From 8b7ba2e9378d9c5686dcedaea83db5daa71f8eea Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 15 Dec 2023 11:47:49 +0800 Subject: [PATCH 001/953] anolis: configs: add default anolis_defconfig These configs is imported from ANCK 5.10-016.1, and they are refreshed by: ARCH=${arch} CROSS_COMPILE=scripts/dummy-tools/ make olddefconfig Signed-off-by: Qiao Ma --- arch/arm64/configs/anolis-debug_defconfig | 7168 +++++++++++++++++++ arch/arm64/configs/anolis_defconfig | 7124 +++++++++++++++++++ arch/x86/configs/anolis-debug_defconfig | 7783 +++++++++++++++++++++ arch/x86/configs/anolis_defconfig | 7710 ++++++++++++++++++++ 4 files changed, 29785 insertions(+) create mode 100644 arch/arm64/configs/anolis-debug_defconfig create mode 100644 arch/arm64/configs/anolis_defconfig create mode 100644 arch/x86/configs/anolis-debug_defconfig create mode 100644 arch/x86/configs/anolis_defconfig diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..247a3d434dab --- /dev/null +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -0,0 +1,7168 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y +CONFIG_KASAN_SHADOW_OFFSET=0xdfff800000000000 + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_TEST_SUSPEND=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +# CONFIG_I2C_HISI is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_HISI_KUNPENG is not set +# CONFIG_SPI_HISI_SFC_V3XX is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HISI is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +# CONFIG_VFIO_PLATFORM is not set +# CONFIG_VFIO_AMBA is not set +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +CONFIG_MAILBOX_TEST=m +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +# CONFIG_ARM_SMMU_V3_SVA is not set +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +# CONFIG_KUNPENG_HCCS is not set +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=y +# CONFIG_HISI_PCIE_PMU is not set +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_DEBUG=y +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_TRUSTED_KEYS_TEE=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +# CONFIG_UBSAN_UNREACHABLE is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_SW_TAGS is not set +# CONFIG_KASAN_HW_TAGS is not set +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_WQ_WATCHDOG=y +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +CONFIG_PERCPU_TEST=m +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig new file mode 100644 index 000000000000..ffd410167da4 --- /dev/null +++ b/arch/arm64/configs/anolis_defconfig @@ -0,0 +1,7124 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +# CONFIG_I2C_HISI is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_HISI_KUNPENG is not set +# CONFIG_SPI_HISI_SFC_V3XX is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HISI is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +# CONFIG_VFIO_PLATFORM is not set +# CONFIG_VFIO_AMBA is not set +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +# CONFIG_ARM_SMMU_V3_SVA is not set +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +# CONFIG_KUNPENG_HCCS is not set +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=y +# CONFIG_HISI_PCIE_PMU is not set +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_TRUSTED_KEYS_TEE=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..714c90743cba --- /dev/null +++ b/arch/x86/configs/anolis-debug_defconfig @@ -0,0 +1,7783 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=1024 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BFQ_CGROUP_DEBUG=y +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +CONFIG_MAC80211_MESSAGE_TRACING=y +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +CONFIG_ATH_DEBUG=y +# CONFIG_ATH_TRACEPOINTS is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +CONFIG_ATH10K_DEBUG=y +# CONFIG_ATH10K_DEBUGFS is not set +CONFIG_ATH10K_TRACING=y +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_RC_LOOPBACK=m +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +# CONFIG_INTEL_IFS is not set +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_VSEC is not set +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +CONFIG_IOMMU_DEBUGFS=y +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +# CONFIG_AMD_IOMMU_DEBUGFS is not set +CONFIG_DMAR_TABLE=y +CONFIG_DMAR_PERF=y +CONFIG_DMAR_DEBUG=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_DEBUGFS=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7091R5 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_BMI160_I2C is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# end of Resolver to digital converters + +# +# Temperature sensors +# +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_RANDOM32_SELFTEST=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +CONFIG_DEBUG_VM_IRQSOFF=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_MAPLE_TREE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEBUG_VM_PGTABLE=y +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +CONFIG_LOCK_STAT=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_MMIOTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig new file mode 100644 index 000000000000..3c8b51687fb7 --- /dev/null +++ b/arch/x86/configs/anolis_defconfig @@ -0,0 +1,7710 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=64 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +# CONFIG_RTW88_DEBUG is not set +# CONFIG_RTW88_DEBUGFS is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +# CONFIG_INTEL_IFS is not set +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_VSEC is not set +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7091R5 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_BMI160_I2C is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# end of Resolver to digital converters + +# +# Temperature sensors +# +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +# CONFIG_MMIOTRACE is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking -- Gitee From 3d7eac954041de74ac8a414dc3c4b563ac609898 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 15 Dec 2023 11:41:31 +0800 Subject: [PATCH 002/953] anolis: spec: add basic framework to generate rpm tree Signed-off-by: Qiao Ma --- anolis/.gitignore | 1 + anolis/Makefile | 87 ++ anolis/Makefile.variables | 80 ++ anolis/buildpkg.sh | 89 ++ anolis/changelog/000-changelog.base | 2 + anolis/genlog.sh | 65 ++ anolis/genrpmtree.sh | 34 + anolis/genspec.sh | 16 + anolis/rpm/cpupower.config | 3 + anolis/rpm/cpupower.service | 13 + anolis/rpm/generate_bls_conf.sh | 35 + anolis/rpm/kernel.spec.template | 1581 +++++++++++++++++++++++++++ 12 files changed, 2006 insertions(+) create mode 100644 anolis/.gitignore create mode 100644 anolis/Makefile create mode 100644 anolis/Makefile.variables create mode 100644 anolis/buildpkg.sh create mode 100644 anolis/changelog/000-changelog.base create mode 100644 anolis/genlog.sh create mode 100644 anolis/genrpmtree.sh create mode 100644 anolis/genspec.sh create mode 100644 anolis/rpm/cpupower.config create mode 100644 anolis/rpm/cpupower.service create mode 100755 anolis/rpm/generate_bls_conf.sh create mode 100644 anolis/rpm/kernel.spec.template diff --git a/anolis/.gitignore b/anolis/.gitignore new file mode 100644 index 000000000000..9b1960e711fc --- /dev/null +++ b/anolis/.gitignore @@ -0,0 +1 @@ +output/ \ No newline at end of file diff --git a/anolis/Makefile b/anolis/Makefile new file mode 100644 index 000000000000..069ceab2c498 --- /dev/null +++ b/anolis/Makefile @@ -0,0 +1,87 @@ +include Makefile.variables + +all: help examples + +dist-check: + @if [ "${DIST_BUILD_MODE}" == "official" ]; then \ + if [ "$(shell git describe --tags --exact-match HEAD 2>/dev/null)" != "${DIST_ANOLIS_VERSION}" ]; then \ + echo "Error: For official build, the tag ${DIST_ANOLIS_VERSION} should point to HEAD"; \ + exit 1; \ + fi \ + fi + @if [ "${DIST_BUILD_MODE}" == "diy" ] && [ -z "${DIST_DIY}" ]; then \ + echo "Error: For diy build, the variable DIST_DIY should not be empty"; \ + exit 1; \ + fi + +dist-genlog: + sh genlog.sh + +dist-genspec: dist-check + sh genspec.sh + +dist-genrpmtree: dist-check + sh genrpmtree.sh + +dist-rpms: dist-genrpmtree dist-check + sh buildpkg.sh + +clean: + rm -rf $(DIST_OUTPUT) + +dist-version: + @echo $(DIST_ANOLIS_VERSION) + +examples: + @echo '' + @echo 'Build Examples:' + @echo '- *RECOMMEND* devel build with basic rpm packages' + @echo ' DIST_BUILD_MODE=devel DIST_BUILD_EXTRA=base make dist-rpms' + @echo '- *RECOMMEND* devel build with full rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-rpms' + @echo '- *RECOMMEND* nightly build with full rpm packages' + @echo ' DIST_BUILD_MODE=nightly make dist-rpms' + @echo '- diy build with basic rpm packages' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" DIST_BUILD_EXTRA=base make dist-rpms' + @echo '' + @echo 'Kernel Version Examples:' + @echo '- show the kernel version of devel mode' + @echo ' DIST_BUILD_MODE=devel make dist-version' + @echo '- show the kernel version of diy mode' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" make dist-version' + @echo '' + @echo 'Other Examples:' + @echo '- only generate rpm tree in devel mode, but do not build rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-genrpmtree' + @echo '- cleanup' + @echo ' make clean' + +help: + @echo 'For anolis release' + @echo '' + @echo 'RUN `make examples` for some examples' + @echo '--------------------------------' + @echo 'generic commands:' + @echo ' dist-genspec - generate kernel spec file through kernel.spec.template and changelog files' + @echo ' dist-genlog - generate changelogs' + @echo ' dist-genrpmtree - generate rpm tree' + @echo ' dist-rpms - build kernel rpm package, it will auto generated in $(DIST_SHORT_OUTPUT)' + @echo ' dist-version - show dist version' + @echo ' clean - cleanup output dir' + @echo ' examples - show some examples' + @echo '' + @echo '-------------------------------' + @echo 'the environment variables that could override:' + @echo ' DIST - the distribution suffix, eg: .an7, .an8, .an23' + @echo ' DIST_OUTPUT - the output directory, default: $(DIST_SHORT_OUTPUT)' + @echo ' DIST_BUILD_MODE - the build mode. optional: official/nightly/devel/diy' + @echo ' !!! NOTE: BE CAUTIOUS ABOUT USING official BUILD !!!' + @echo ' - official build. kernel version: $(DIST_KERNELVERSION)-$(DIST_OFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - nightly build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - devel build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), without srpm' + @echo ' - diy build. kernel version: $(DIST_KERNELVERSION)-$(DIST_DIY_PKGRELEASEVERION), with srpm' + @echo ' DIST_BUILD_NUMBER - the build number for unofficial build, eg: 1/2' + @echo ' DIST_DIY - the kernel version for diy build' + @echo ' DIST_BUILD_VARIANT & DIST_BUILD_EXTRA - see comments in buildpkg.sh' + +export \ No newline at end of file diff --git a/anolis/Makefile.variables b/anolis/Makefile.variables new file mode 100644 index 000000000000..d806e7988230 --- /dev/null +++ b/anolis/Makefile.variables @@ -0,0 +1,80 @@ +# the global environment variables, which will be passed to shell scripts +# all variables are start with DIST_, to avoid influences kernel build + +# the dist suffix, eg: an7, an8, an23 +DIST ?= .an23 + +# build mode: +# - official build, the kernel version looks like: 5.10.134-15.1_rc1, and also generate source rpm +# - nightly build, the kernel version looks like: 5.10.134-1.git.6235a991a61d, and also generate source rpm +# - devel build, same as nightly build, without source rpm +DIST_BUILD_MODE ?= devel + +# the package release version. +# eg: for ANCK 5.10-015.1, the major version is 15, the minor version is 1 +DIST_RELEASE_MAJOR_VERSION = 1 +DIST_RELEASE_MINOR_VERSION = + +# testing stage. +# eg: alpha, beta, rc +DIST_TESTING_STAGE = rc +DIST_TESTING_STAGE_MAJOR_VERSION = 1 +DIST_TESTING_STAGE_MINOR_VERSION = + +# special versions, eg: the pgo version +DIST_SPECIAL_VERSION_NAME = +DIST_SPECIAL_VERSION_MAJOR = +DIST_SPECIAL_VERSION_MINOR = + +# build number +DIST_BUILD_NUMBER ?= 1 + +# the kernel root +DIST_SRCROOT = $(shell realpath ..)/ +DIST_SOURCES = $(DIST_SRCROOT)anolis/ +DIST_RPM = $(DIST_SOURCES)rpm/ +DIST_CHANGELOG = $(DIST_SOURCES)changelog/ + +# the output directory +DIST_OUTPUT ?= $(DIST_SOURCES)output/ +DIST_RPMBUILDDIR_OUTPUT = ${DIST_OUTPUT}/rpmbuild +DIST_SHORT_OUTPUT=$(subst $(DIST_SRCROOT),,$(DIST_OUTPUT)) + +DIST_SPEC_TEMPLATE = kernel.spec.template +DIST_SPEC_FILE = kernel.spec + +# generate anolis kernel version + +# kernel version for offical build +DIST_RELEASE_VERSION = $(DIST_RELEASE_MAJOR_VERSION)$(if $(DIST_RELEASE_MINOR_VERSION),.$(DIST_RELEASE_MINOR_VERSION)) +DIST_SPECIAL_VERSION = $(if $(DIST_SPECIAL_VERSION_NAME),.$(DIST_SPECIAL_VERSION_NAME)$(if $(DIST_SPECIAL_VERSION_MAJOR),.$(DIST_SPECIAL_VERSION_MAJOR))$(if $(DIST_SPECIAL_VERSION_MINOR),.$(DIST_SPECIAL_VERSION_MINOR))) +DIST_TESTING_VERSION = $(if $(DIST_TESTING_STAGE),_$(DIST_TESTING_STAGE)$(if $(DIST_TESTING_STAGE_MAJOR_VERSION),$(DIST_TESTING_STAGE_MAJOR_VERSION))$(if $(DIST_TESTING_STAGE_MINOR_VERSION),.$(DIST_TESTING_STAGE_MINOR_VERSION))) +DIST_LINUXVERSION:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^VERSION\ =\ /{s///;p;q}') +DIST_LINUXKPATCHLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^PATCHLEVEL\ =\ /{s///;p;q}') +DIST_LINUXKSUBLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^SUBLEVEL\ =\ /{s///;p;q}') +DIST_KERNELVERSION = $(DIST_LINUXVERSION).$(DIST_LINUXKPATCHLEVEL).$(DIST_LINUXKSUBLEVEL) +DIST_OFFICIAL_PKGRELEASEVERION = $(DIST_RELEASE_VERSION)$(DIST_SPECIAL_VERSION)$(DIST_TESTING_VERSION) + +# kernel version for unoffical build +DIST_GIT_HEAD_SHORT_COMMIT_ID = $(shell git rev-parse --short HEAD) +DIST_GIT_HEAD_FULL_COMMIT_ID = $(shell git rev-parse HEAD) +DIST_UNOFFICIAL_PKGRELEASEVERION = ${DIST_BUILD_NUMBER}.git.$(DIST_GIT_HEAD_SHORT_COMMIT_ID) + +# kernel version for diy build +DIST_DIY_PKGRELEASEVERION = ${DIST_DIY}.diy + +# final kernel version +ifeq ("${DIST_BUILD_MODE}", "official") +DIST_PKGRELEASEVERION = $(DIST_OFFICIAL_PKGRELEASEVERION) +else ifeq ("${DIST_BUILD_MODE}", "diy") +DIST_PKGRELEASEVERION = $(DIST_DIY_PKGRELEASEVERION) +else +DIST_PKGRELEASEVERION = $(DIST_UNOFFICIAL_PKGRELEASEVERION) +endif +DIST_ANOLIS_VERSION = $(DIST_KERNELVERSION)-$(DIST_PKGRELEASEVERION) + +# the package id used for compress kernel tarball: +# for official build, we compress tarball from tag +# for unofficial build, we compress tarball from git HEAD +DIST_PKG_COMMIT_ID = $(if $(DIST_OFFICIAL_BUILD),$(DIST_ANOLIS_VERSION),$(DIST_GIT_HEAD_FULL_COMMIT_ID)) + diff --git a/anolis/buildpkg.sh b/anolis/buildpkg.sh new file mode 100644 index 000000000000..34ff6a929f1d --- /dev/null +++ b/anolis/buildpkg.sh @@ -0,0 +1,89 @@ +set -xe + +function do_rpmbuild() { + if [ "$DIST_BUILD_MODE" == "official" ] || \ + [ "$DIST_BUILD_MODE" == "nightly" ] || \ + [ "$DIST_BUILD_MODE" == "diy" ]; then + CMD="-ba" + else + CMD="-bb" + fi + + # Now we have: + # + variants: default, only-debug, with-debug + # + extras: base, with-debuginfo, full + # + modes: official, nightly, dev + #TODO: add with-gcov + # + # Matrix + # + # | BuildMode | KernelName | GenerateSrpm | + # |-----------|-----------------|--------------| + # | official | without sha id | Yes | + # | nightly | with git sha id | Yes | + # | devel | with git sha id | No | + # + # | Extra\Var | Default | Only-debug | With-debug | + # |-----------|----------|------------|------------| + # | Base | +default | -default | +default | + # | | -debug | +debug | +debug | + # | | +headers | + # |-----------|------------------------------------| + # | debuginfo | +debuginfo | + # |-----------|------------------------------------| + # | full | +tools +doc +perf | + # + # Note: pre-release mode will always be "full" and "with-debug" by default + + build_opts="--with headers --without bpftool --without signmodules" + + if [ "_${DIST_BUILD_VARIANT}" == "_only-debug" ]; then + build_opts="$build_opts --without default --with debug" + elif [ "_${DIST_BUILD_VARIANT}" == "_with-debug" ]; then + build_opts="$build_opts --with default --with debug" + else # assume default + build_opts="$build_opts --with default --without debug" + fi + + if [ "_${DIST_BUILD_EXTRA}" == "_debuginfo" ]; then + build_opts="$build_opts --with debuginfo --without tools --without doc --without perf" + elif [ "_${DIST_BUILD_EXTRA}" == "_base" ]; then + build_opts="$build_opts --without debuginfo --without tools --without doc --without perf" + else # assume full + build_opts="$build_opts --with debuginfo --with tools --with doc --with perf" + fi + + # launch a new shell to clear current environment variables passed by Makefile + rpmbuild \ + --define "%_smp_mflags -j$(nproc)" \ + --define "%packager " \ + --define "%_topdir ${DIST_RPMBUILDDIR_OUTPUT}" \ + ${build_opts} \ + ${CMD} ${DIST_RPMBUILDDIR_OUTPUT}/SPECS/kernel.spec \ + --target=$(uname -m) || exit 1 +} + +function output() { + if [ -z "$DIST_OFFICIAL_BUILD" ]; then + targetdir=${DIST_BUILD_NUMBER} + else + targetdir=${DIST_ANOLIS_VERSION} + fi + + mkdir -p ${DIST_OUTPUT}/${targetdir} + + cp ${DIST_RPMBUILDDIR_OUTPUT}/RPMS/$(uname -m)/*.rpm ${DIST_OUTPUT}/${targetdir}/ + + # copy srpm packages if and only if they exist. + if [ -f ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ]; then + cp ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ${DIST_OUTPUT}/${targetdir} + fi + + ls ${DIST_OUTPUT}/${targetdir}/*.rpm + + rpm_num=$(ls ${DIST_OUTPUT}/${targetdir}/*.rpm | wc -l) + echo "${rpm_num} rpm(s) copied." +} + +do_rpmbuild +output \ No newline at end of file diff --git a/anolis/changelog/000-changelog.base b/anolis/changelog/000-changelog.base new file mode 100644 index 000000000000..7bf7526d60c1 --- /dev/null +++ b/anolis/changelog/000-changelog.base @@ -0,0 +1,2 @@ +* Fri Dec 15 2023 Qiao Ma [6.6.7-1_rc1%%DIST%%] +- anolis: bump kernel to 6.6.7 (Qiao Ma) \ No newline at end of file diff --git a/anolis/genlog.sh b/anolis/genlog.sh new file mode 100644 index 000000000000..a11293855fa5 --- /dev/null +++ b/anolis/genlog.sh @@ -0,0 +1,65 @@ +# by default, it generates changlogs from latest-tag to HEAD +function get_changelog_start_end() { + if [ -z "$CHANGELOG_START" ]; then + CHANGELOG_START=$(git describe --tags --abbrev=0) + fi + if [ -z "$CHANGELOG_START" ]; then + echo "cannot decide CHANGELOG_START" + exit 1 + fi + + if [ -z "$CHANGELOG_END" ]; then + CHANGELOG_END=$(git log --format="%H" -1 HEAD) + fi +} + +function get_author_sign() { + if [ -z "$AUTHOR_SIGN" ]; then + AUTHOR_SIGN=$(git var GIT_COMMITTER_IDENT |sed 's/>.*/>/') + fi + if [ -z "$AUTHOR_SIGN" ]; then + echo "unkonwn AUTHOR_SIGN" + exit 1 + fi +} + +function get_changelog_file_name() { + local file_base_name="changelog.${DIST_ANOLIS_VERSION}" + local files_num=$(ls ${DIST_CHANGELOG} | grep -E '[0-9]+-changelog.*' | wc -l) + local file_name=$(printf "%03d-${file_base_name}\n" ${files_num}) + CHANGELOG_FILE=${DIST_CHANGELOG}/${file_name} +} + +function generate_changelog() { + get_changelog_start_end + get_author_sign + get_changelog_file_name + + touch ${CHANGELOG_FILE} + echo "* $(date +"%a %b %d %Y") ${AUTHOR_SIGN} [${DIST_ANOLIS_VERSION}%%DIST%%]" > ${CHANGELOG_FILE} + + # TODO: + # 1. if config changes, add kernel config refresh log + # 2. if linux upstream kernel version updated, add related log + + local commits=$(git rev-list ${CHANGELOG_START}..${CHANGELOG_END}) + for commit in $commits + do + ## eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) + local log=$(git log --format='- %s (%an)' -1 ${commit}) + + ## eg: {CVE-2022-32250} + ## xargs is used to strip space + local cve_list=$(git log --format='%b' -1 ${commit} | grep -Eio '^[[:blank:]]*Fixes:[[:blank:]]*CVE-.*[[:blank:]]*$' | sed 's/fixes://ig' | xargs | sed 's/[[:blank:]]/,/') + local cve_fmt="" + if [ -n "${cve_list}" ]; then + cve_fmt=$(cat <<< "${cve_list}" | paste -sd "," -) + cve_fmt=" {${cve_fmt}}" + fi + ## merge them together, eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) {CVE-2022-32250} + echo "${log}${cve_fmt}" >> ${CHANGELOG_FILE} + done + echo "" >> ${CHANGELOG_FILE} +} + +generate_changelog \ No newline at end of file diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh new file mode 100644 index 000000000000..dd19f111f649 --- /dev/null +++ b/anolis/genrpmtree.sh @@ -0,0 +1,34 @@ +#! /bin/bash + +set -xe + +function do_prep() { + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT} + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + cp ${DIST_RPM}/cpupower* ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + cp ${DIST_RPM}/generate_bls_conf.sh ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + + # for official build, the corresponding tag should exist + if [ -n "$DIST_OFFICIAL_BUILD" ]; then + if ! git tag | grep -q -x "${DIST_PKG_COMMIT_ID}"; then + echo "cannot find official build tag: ${DIST_PKG_COMMIT_ID}" + exit 1 + fi + fi + + pkgname="linux-${DIST_ANOLIS_VERSION}${DIST}" + pushd ${DIST_SRCROOT} > /dev/null + git archive --format=tar --prefix="${pkgname}/" ${DIST_PKG_COMMIT_ID} | xz -T$(nproc) > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz + md5sum ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/download + popd > /dev/null + DIST_OUTPUT=${DIST_RPMBUILDDIR_OUTPUT}/SPECS/ sh genspec.sh + + cp ${DIST_SRCROOT}/arch/x86/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config + cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + +} + +do_prep \ No newline at end of file diff --git a/anolis/genspec.sh b/anolis/genspec.sh new file mode 100644 index 000000000000..cfd9f58c2a4e --- /dev/null +++ b/anolis/genspec.sh @@ -0,0 +1,16 @@ +#! /bin/bash +# generate kernel spec through spec template and changelog files. +# it it call from Makefile, do not run it directly. + +mkdir -p ${DIST_OUTPUT} +cp -f ${DIST_RPM}/${DIST_SPEC_TEMPLATE} ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +for changelog_file in $(ls ${DIST_CHANGELOG} | sort) +do + sed -i "/%changelog/r ${DIST_CHANGELOG}/${changelog_file}" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +done + +sed -i -e " + s/%%DIST%%/$DIST/ + s/%%DIST_KERNELVERSION%%/$DIST_KERNELVERSION/ + s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} \ No newline at end of file diff --git a/anolis/rpm/cpupower.config b/anolis/rpm/cpupower.config new file mode 100644 index 000000000000..8629a4a3ede7 --- /dev/null +++ b/anolis/rpm/cpupower.config @@ -0,0 +1,3 @@ +# See 'cpupower help' and cpupower(1) for more info +CPUPOWER_START_OPTS="frequency-set -g performance" +CPUPOWER_STOP_OPTS="frequency-set -g ondemand" diff --git a/anolis/rpm/cpupower.service b/anolis/rpm/cpupower.service new file mode 100644 index 000000000000..5f10ab7ee39a --- /dev/null +++ b/anolis/rpm/cpupower.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configure CPU power related settings +After=syslog.target + +[Service] +Type=oneshot +RemainAfterExit=yes +EnvironmentFile=/etc/sysconfig/cpupower +ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS +ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS + +[Install] +WantedBy=multi-user.target diff --git a/anolis/rpm/generate_bls_conf.sh b/anolis/rpm/generate_bls_conf.sh new file mode 100755 index 000000000000..878696c12f33 --- /dev/null +++ b/anolis/rpm/generate_bls_conf.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e + +. /etc/os-release + +if [ "${ID}" == "anolis" ]; then + VERSION=${VERSION%%.*} +fi + +kernelver=$1 && shift +rootfs=$1 && shift +variant=$1 && shift + +output="${rootfs}/lib/modules/${kernelver}/bls.conf" +date=$(date -u +%Y%m%d%H%M%S) + +if [ "${variant:-5}" = "debug" ]; then + debugname=" with debugging" + debugid="-debug" +else + debugname="" + debugid="" +fi + +cat >${output} < in your rpmbuild command or force values +# to 0 in here to disable them. +# +# standard kernel +%define with_up %{?_without_up: 0} %{?!_without_up: 1} +# kernel-debug +%define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} +# kernel-doc +%define with_doc %{?_without_doc: 0} %{?!_without_doc: 1} +# kernel-headers +%define with_headers %{?_without_headers: 0} %{?!_without_headers: 1} +# perf +%define with_perf %{?_without_perf: 0} %{?!_without_perf: 1} +# tools +%define with_tools %{?_without_tools: 0} %{?!_without_tools: 1} +# bpf tool +%define with_bpftool %{?_without_bpftool: 0} %{?!_without_bpftool: 1} +# kernel-debuginfo +%define with_debuginfo %{?_without_debuginfo: 0} %{?!_without_debuginfo: 1} +# +# Additional options for user-friendly one-off kernel building: +# +# Only build the base kernel (--with baseonly): +%define with_baseonly %{?_with_baseonly: 1} %{?!_with_baseonly: 0} +# Only build the debug kernel (--with dbgonly): +%define with_dbgonly %{?_with_dbgonly: 1} %{?!_with_dbgonly: 0} +# +# should we do C=1 builds with sparse +%define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} + +# The kernel tarball/base version +%define kversion 5.10 + +%define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} + +# turn off debug kernel for gcov builds +%if %{with_gcov} +%define with_debug 0 +%endif + +%define make_target bzImage +%define image_install_path boot + +%define KVERREL %{version}-%{release}.%{_target_cpu} +%define KVERREL_RE %(echo %KVERREL | sed 's/+/[+]/g') +%define hdrarch %_target_cpu +%define asmarch %_target_cpu + +%if !%{with_debuginfo} +%define _enable_debug_packages 0 +%endif +%define debuginfodir /usr/lib/debug +# Needed because we override almost everything involving build-ids +# and debuginfo generation. Currently we rely on the old alldebug setting. +%global _build_id_links alldebug + +# if requested, only build base kernel +%if %{with_baseonly} +%define with_debug 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%endif + +# if requested, only build debug kernel +%if %{with_dbgonly} +%define with_up 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%endif + +# Overrides for generic default options + +# only package docs noarch +%ifnarch noarch +%define with_doc 0 +%define doc_build_fail true +%endif + +# don't build noarch kernels or headers (duh) +%ifarch noarch +%define with_up 0 +%define with_headers 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%define with_debug 0 +%define with_doc 0 +%define all_arch_configs %{name}-%{version}-*.config +%endif + +# Per-arch tweaks + +%ifarch x86_64 +%define asmarch x86 +%define all_arch_configs %{name}-%{version}-x86_64*.config +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch aarch64 +%define all_arch_configs %{name}-%{version}-aarch64*.config +%define asmarch arm64 +%define hdrarch arm64 +%define make_target Image.gz +%define kernel_image arch/arm64/boot/Image.gz +%endif + +# To temporarily exclude an architecture from being built, add it to +# %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we +# don't build kernel-headers then the new build system will no longer let +# us use the previous build of that package -- it'll just be completely AWOL. +# Which is a BadThing(tm). + +# We only build kernel-headers on the following... +%define nobuildarches i386 i686 + +%ifarch %nobuildarches +%define with_up 0 +%define with_debug 0 +%define with_debuginfo 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%define _enable_debug_packages 0 +%endif + +# Architectures we build tools/cpupower on +%define cpupowerarchs x86_64 aarch64 + + +# +# Packages that need to be installed before the kernel is, because the %%post +# scripts use them. +# +%define kernel_prereq coreutils, systemd >= 203-2, /usr/bin/kernel-install +%define initrd_prereq dracut >= 027 + + +Name: kernel%{?variant} +Group: System Environment/Kernel +License: GPLv2 and Redistributable, no modification permitted +URL: http://www.kernel.org/ +Version: %{rpmversion} +Release: %{pkg_release} +Summary: The Linux kernel, based on version %{version}, heavily modified with backports +# DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. +# SET %%nobuildarches (ABOVE) INSTEAD +ExclusiveArch: noarch i686 x86_64 aarch64 +ExclusiveOS: Linux + + +# +# List the packages used during the kernel build +# +BuildRequires: kmod, patch, bash, coreutils, tar, git, which +BuildRequires: bzip2, xz, findutils, gzip, m4, perl-interpreter, perl-Carp, perl-devel, perl-generators, make, diffutils, gawk +BuildRequires: gcc, binutils, system-rpm-config, hmaccalc, python3-devel +BuildRequires: net-tools, hostname, bc, bison, flex, elfutils-devel, dwarves +BuildRequires: libnl3-devel +%if %{with_doc} +BuildRequires: xmlto, asciidoc, python3-sphinx +%endif +%if %{with_headers} +BuildRequires: rsync +%endif +%if %{with_sparse} +BuildRequires: sparse +%endif +%if %{with_perf} +BuildRequires: zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex xz-devel +BuildRequires: audit-libs-devel +BuildRequires: java-devel +BuildRequires: libbpf-devel +BuildRequires: libbabeltrace-devel +BuildRequires: libtraceevent-devel +BuildRequires: numactl-devel +%ifarch aarch64 +BuildRequires: opencsd-devel >= 1.0.0 +%endif +%endif +%if %{with_tools} +BuildRequires: gettext ncurses-devel +BuildRequires: libcap-devel libcap-ng-devel +BuildRequires: pciutils-devel +BuildRequires: openssl-devel +%endif +%if %{with_bpftool} +BuildRequires: python3-docutils +BuildRequires: zlib-devel binutils-devel +%endif +BuildConflicts: rhbuildsys(DiskFree) < 500Mb +%if %{with_debuginfo} +BuildRequires: rpm-build, elfutils +#BuildConflicts: rpm < 4.13.0.1-19 +# Most of these should be enabled after more investigation +%undefine _include_minidebuginfo +%undefine _find_debuginfo_dwz_opts +%undefine _unique_build_ids +%undefine _unique_debug_names +%undefine _unique_debug_srcs +%undefine _debugsource_packages +%undefine _debuginfo_subpackages +%global _find_debuginfo_opts -r --keep-section .BTF* +%global _missing_build_ids_terminate_build 1 +%global _no_recompute_build_ids 1 +%endif + +BuildRequires: openssl openssl-devel + +# These below are required to build man pages +%if %{with_perf} +BuildRequires: xmlto +%endif +%if %{with_perf} || %{with_tools} +BuildRequires: asciidoc +%endif + +Source0: linux-%{rpmversion}-%{pkg_release}.tar.xz + +%define modsign_cmd %{SOURCE18} + +Source20: kernel-%{version}-aarch64.config +Source21: kernel-%{version}-aarch64-debug.config +Source39: kernel-%{version}-x86_64.config +Source40: kernel-%{version}-x86_64-debug.config +Source43: generate_bls_conf.sh + + + +# Sources for kernel-tools +Source2000: cpupower.service +Source2001: cpupower.config + +## Patches needed for building this package + +# %%PATCH_LIST%% + +# END OF PATCH DEFINITIONS + +BuildRoot: %{_tmppath}/%{name}-%{KVERREL}-root + +%description +This is the package which provides the Linux kernel for Alibaba Cloud Linux. +It is based on upstream Linux at version %{version} and maintains kABI +compatibility of a set of approved symbols, however it is heavily modified with +backports and fixes pulled from newer upstream Linux %{name} releases. This means +this is not a %{version} kernel anymore: it includes several components which come +from newer upstream linux versions, while maintaining a well tested and stable +core. Some of the components/backports that may be pulled in are: changes like +updates to the core kernel (eg.: scheduler, cgroups, memory management, security +fixes and features), updates to block layer, supported filesystems, major driver +updates for supported hardware in Alibaba Cloud Linux, enhancements for +enterprise customers, etc. + +# +# This macro does requires, provides, conflicts, obsoletes for a kernel package. +# %%kernel_reqprovconf +# It uses any kernel__conflicts and kernel__obsoletes +# macros defined above. +# +%define kernel_reqprovconf \ +Provides: %{name} = %{rpmversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: kernel-drm-nouveau = 16\ +Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Requires(pre): %{kernel_prereq}\ +Requires(pre): %{initrd_prereq}\ +Requires(pre): linux-firmware >= 20190516-94.git711d3297\ +Requires(preun): systemd >= 200\ +Conflicts: xfsprogs < 4.3.0-1\ +Conflicts: xorg-x11-drv-vmmouse < 13.0.99\ +%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\ +# We can't let RPM do the dependencies automatic because it'll then pick up\ +# a correct but undesirable perl dependency from the module headers which\ +# isn't required for the kernel proper to function\ +AutoReq: no\ +AutoProv: yes\ +%{nil} + + +%package doc +Summary: Various documentation bits found in the kernel source +Group: Documentation +%description doc +This package contains documentation files from the kernel +source. Various bits of information about the Linux kernel and the +device drivers shipped with it are documented in these files. + +You'll want to install this package if you need a reference to the +options that can be passed to Linux kernel modules at load time. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders < 3.0-46 +Provides: glibc-kernheaders = 3.0-46 +%if "0%{?variant}" +Obsoletes: kernel-headers < %{rpmversion}-%{pkg_release} +Provides: kernel-headers = %{rpmversion}-%{pkg_release} +%endif +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{name}-debuginfo packages +Group: Development/Debug +Provides: installonlypkg(kernel) +%description debuginfo-common-%{_target_cpu} +This package is required by %{name}-debuginfo subpackages. +It provides the kernel source files common to all builds. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Requires: bzip2 +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|.*%%{_libdir}/libperf-jvmti.so(\.debug)?|XXX' -o perf-debuginfo.list} + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python3-perf +The python3-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%package -n python3-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n python3-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{python3_sitearch}/perf.*so(\.debug)?|XXX' -o python3-perf-debuginfo.list} + +# with_perf +%endif + +%if %{with_tools} +%package -n %{name}-tools +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +%ifarch %{cpupowerarchs} +Provides: cpupowerutils = 1:009-0.6.p1 +Obsoletes: cpupowerutils < 1:009-0.6.p1 +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +Requires: %{name}-tools-libs = %{version}-%{release} +%endif +%define __requires_exclude ^%{_bindir}/python +%description -n %{name}-tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package -n %{name}-tools-libs +Summary: Libraries for the %{name}-tools +Group: Development/System +License: GPLv2 +%description -n %{name}-tools-libs +This package contains the libraries built from the tools/ directory +from the kernel source. + +%package -n %{name}-tools-libs-devel +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +Requires: %{name}-tools = %{version}-%{release} +%ifarch %{cpupowerarchs} +Provides: cpupowerutils-devel = 1:009-0.6.p1 +Obsoletes: cpupowerutils-devel < 1:009-0.6.p1 +%endif +Requires: %{name}-tools-libs = %{version}-%{release} +Provides: %{name}-tools-devel +%description -n %{name}-tools-libs-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%package -n %{name}-tools-debuginfo +Summary: Debug information for package %{name}-tools +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n %{name}-tools-debuginfo +This package provides debug information for package %{name}-tools. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/centrino-decode(\.debug)?|.*%%{_bindir}/powernow-k8-decode(\.debug)?|.*%%{_bindir}/cpupower(\.debug)?|.*%%{_libdir}/libcpupower.*|.*%%{_bindir}/turbostat(\.debug)?|.*%%{_bindir}/x86_energy_perf_policy(\.debug)?|.*%%{_bindir}/tmon(\.debug)?|.*%%{_bindir}/lsgpio(\.debug)?|.*%%{_bindir}/gpio-hammer(\.debug)?|.*%%{_bindir}/gpio-event-mon(\.debug)?|.*%%{_bindir}/iio_event_monitor(\.debug)?|.*%%{_bindir}/iio_generic_buffer(\.debug)?|.*%%{_bindir}/lsiio(\.debug)?|XXX' -o %{name}-tools-debuginfo.list} + +# with_tools +%endif + +%if %{with_bpftool} + +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +License: GPLv2 +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package -n bpftool-debuginfo +Summary: Debug information for package bpftool +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n bpftool-debuginfo +This package provides debug information for the bpftool package. + +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_sbindir}/bpftool(\.debug)?|XXX' -o bpftool-debuginfo.list} + +# with_bpftool +%endif + +%if %{with_gcov} +%package gcov +Summary: gcov graph and source files for coverage data collection. +Group: Development/System +%description gcov +kernel-gcov includes the gcov graph and source files for gcov coverage collection. +%endif + +# +# This macro creates a kernel--debuginfo package. +# %%kernel_debuginfo_package +# +%define kernel_debuginfo_package() \ +%package %{?1:%{1}-}debuginfo\ +Summary: Debug information for package %{name}%{?1:-%{1}}\ +Group: Development/Debug\ +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}%{?1:-%{1}}-debuginfo-%{_target_cpu} = %{version}-%{release}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +%description %{?1:%{1}-}debuginfo\ +This package provides debug information for package %{name}%{?1:-%{1}}.\ +This is required to use SystemTap with %{name}%{?1:-%{1}}-%{KVERREL}.\ +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '/.*/%%{KVERREL_RE}%{?1:[+]%{1}}/.*|/.*%%{KVERREL_RE}%{?1:\+%{1}}(\.debug)?' -o debuginfo%{?1}.list}\ +%{nil} + +# +# This macro creates a kernel--devel package. +# %%kernel_devel_package +# +%define kernel_devel_package() \ +%package %{?1:%{1}-}devel\ +Summary: Development package for building kernel modules to match the %{?2:%{2} }kernel\ +Group: System Environment/Kernel\ +Provides: %{name}%{?1:-%{1}}-devel-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}-devel-%{_target_cpu} = %{version}-%{release}%{?1:+%{1}}\ +Provides: %{name}-devel-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +Requires(pre): findutils\ +Requires: findutils\ +Requires: perl-interpreter\ +%description %{?1:%{1}-}devel\ +This package provides kernel headers and makefiles sufficient to build modules\ +against the %{?2:%{2} }kernel package.\ +%{nil} + +# +# This macro creates a %%{name}- and its -devel and -debuginfo too. +# %%define variant_summary The Linux kernel compiled for +# %%kernel_variant_package [-n ] +# +%define kernel_variant_package(n:) \ +%package %{?1:%{1}}\ +Summary: %{variant_summary}\ +Group: System Environment/Kernel\ +Provides: installonlypkg(kernel)\ +Requires: grubby \ +%{expand:%%kernel_reqprovconf}\ +%{expand:%%kernel_devel_package %{?1:%{1}} %{!?-n:%{?1:%{1}}}%{?-n:%{-n*}}}\ +%{expand:%%kernel_debuginfo_package %{?1:%{1}}}\ +%{nil} + +# First the auxiliary packages of the main kernel package. +%kernel_devel_package +%kernel_debuginfo_package + +# Now, each variant package. + +%define variant_summary The Linux kernel compiled with extra debugging enabled +%kernel_variant_package debug +%description debug +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + +%prep +# do a few sanity-checks for --with *only builds +%if %{with_baseonly} +%if !%{with_up} +echo "Cannot build --with baseonly, up build is disabled" +exit 1 +%endif +%endif + +# more sanity checking; do it quietly +if [ "%{patches}" != "%%{patches}" ] ; then + for patch in %{patches} ; do + if [ ! -f $patch ] ; then + echo "ERROR: Patch ${patch##/*/} listed in specfile but is missing" + exit 1 + fi + done +fi 2>/dev/null + +patch_command='patch -p1 -F1 -s' +ApplyPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + if ! grep -E "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then + if [ "${patch:0:8}" != "patch-4." ] ; then + echo "ERROR: Patch $patch not listed as a source patch in specfile" + exit 1 + fi + fi 2>/dev/null + case "$patch" in + *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.gz) gunzip < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.xz) unxz < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *) sed -n '/^---$/,$p' "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + esac +} + +# don't apply patch if it's empty +ApplyOptionalPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + local C=$(wc -l $RPM_SOURCE_DIR/$patch | awk '{print $1}') + if [ "$C" -gt 9 ]; then + ApplyPatch $patch ${1+"$@"} + fi +} + +%setup -q -n %{name}-%{rpmversion}-%{pkg_release} -c +mv linux-%{rpmversion}-%{pkg_release} linux-%{KVERREL} + +cd linux-%{KVERREL} + +# Drop some necessary files from the source dir into the buildroot +cp $RPM_SOURCE_DIR/kernel-%{version}-*.config . + +# %%PATCH_APPLICATION%% + +# END OF PATCH APPLICATIONS + +# Any further pre-build tree manipulations happen here. + +chmod +x scripts/checkpatch.pl +mv COPYING COPYING-%{version} + +# This Prevents scripts/setlocalversion from mucking with our version numbers. +touch .scmversion + +# Do not use "ambiguous" python shebangs. RHEL 8 now has a new script +# (/usr/lib/rpm/redhat/brp-mangle-shebangs), which forces us to specify a +# "non-ambiguous" python shebang for scripts we ship in buildroot. This +# script throws an error like below: +# *** ERROR: ambiguous python shebang in /usr/bin/kvm_stat: #!/usr/bin/python. Change it to python3 (or python2) explicitly. +# We patch all sources below for which we got a report/error. +pathfix.py -i "%{__python3} %{py3_shbang_opts}" -p -n \ + tools/kvm/kvm_stat/kvm_stat \ + scripts/show_delta \ + scripts/diffconfig \ + scripts/bloat-o-meter \ + scripts/jobserver-exec \ + tools \ + Documentation \ + scripts/clang-tools + +%define make make HOSTCFLAGS="%{?build_hostcflags}" HOSTLDFLAGS="%{?build_hostldflags}" + +# only deal with configs if we are going to build for the arch +%ifnarch %nobuildarches + +rm -rf configs +mkdir configs + +# Remove configs not for the buildarch +for cfg in kernel-%{version}-*.config; do + if [ `echo %{all_arch_configs} | grep -c $cfg` -eq 0 ]; then + rm -f $cfg + fi +done + +# enable GCOV kernel config options if gcov is on +%if %{with_gcov} +for i in *.config +do + sed -i 's/# CONFIG_GCOV_KERNEL is not set/CONFIG_GCOV_KERNEL=y\nCONFIG_GCOV_PROFILE_ALL=y\n/' $i +done +%endif + +# now run oldconfig over all the config files +for i in *.config +do + mv $i .config + Arch=`sed -n 3p .config | cut -d' ' -f2 | cut -d'/' -f2` + make ARCH=$Arch listnewconfig | grep -E '^CONFIG_' >.newoptions || true + if [ -s .newoptions ]; then + cat .newoptions + #exit 1 + fi + rm -f .newoptions + make ARCH=$Arch olddefconfig + echo "# $Arch" > configs/$i + cat .config >> configs/$i +done +# end of kernel config +%endif + +# # End of Configs stuff + +# get rid of unwanted files resulting from patch fuzz +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null + +# remove unnecessary SCM files +find . -name .gitignore -exec rm -f {} \; >/dev/null + +cd .. + +### +### build +### +%build + +%if %{with_sparse} +%define sparse_mflags C=1 +%endif + +cp_vmlinux() +{ + eu-strip --remove-comment -o "$2" "$1" +} + +BuildKernel() { + MakeTarget=$1 + KernelImage=$2 + Flavour=$3 + Flav=${Flavour:++${Flavour}} + InstallName=${5:-vmlinuz} + + DoModules=1 + + # Pick the right config file for the kernel we're building + Config=kernel-%{version}-%{_target_cpu}${Flavour:+-${Flavour}}.config + DevelDir=/usr/src/kernels/%{KVERREL}${Flav} + + # When the bootable image is just the ELF kernel, strip it. + # We already copy the unstripped file into the debuginfo package. + if [ "$KernelImage" = vmlinux ]; then + CopyKernel=cp_vmlinux + else + CopyKernel=cp + fi + + KernelVer=%{version}-%{release}.%{_target_cpu}${Flav} + echo BUILDING A KERNEL FOR ${Flavour} %{_target_cpu}... + + # make sure EXTRAVERSION says what we want it to say + perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}.%{_target_cpu}${Flav}/" Makefile + + # and now to start the build process + + %{make} -s %{?_smp_mflags} mrproper + cp configs/$Config .config + + %if %{signmodules} + cp %{SOURCE11} certs/. + cp %{SOURCE12} certs/. + %endif + + Arch=`head -1 .config | cut -b 3-` + echo USING ARCH=$Arch + + KCFLAGS="%{?kcflags}" + + # add kpatch flags for base kernel + if [ "$Flavour" == "" ]; then + KCFLAGS="$KCFLAGS %{?kpatch_kcflags}" + fi + + %{make} -s ARCH=$Arch olddefconfig >/dev/null + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" $MakeTarget %{?sparse_mflags} %{?kernel_mflags} + if [ $DoModules -eq 1 ]; then + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" modules %{?sparse_mflags} || exit 1 + fi + + mkdir -p $RPM_BUILD_ROOT/%{image_install_path} + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/%{image_install_path} +%endif + +%ifarch aarch64 + %{make} -s ARCH=$Arch V=1 dtbs dtbs_install INSTALL_DTBS_PATH=$RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer + cp -r $RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/dtb + find arch/$Arch/boot/dts -name '*.dtb' -type f | xargs rm -f +%endif + + # Start installing the results + install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer + install -m 644 .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/config + install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer + install -m 644 System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/System.map + + # We estimate the size of the initramfs because rpm needs to take this size + # into consideration when performing disk space calculations. (See bz #530778) + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 + + if [ -f arch/$Arch/boot/zImage.stub ]; then + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/lib/modules/$KernelVer/zImage.stub-$KernelVer || : + fi + + $CopyKernel $KernelImage \ + $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + chmod 755 $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + cp $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/$InstallName + + # hmac sign the kernel for FIPS + echo "Creating hmac file: $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac" + ls -l $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + sha512hmac $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer | sed -e "s,$RPM_BUILD_ROOT,," > $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac; + cp $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac $RPM_BUILD_ROOT/lib/modules/$KernelVer/.vmlinuz.hmac + + if [ $DoModules -eq 1 ]; then + # Override $(mod-fw) because we don't want it to install any firmware + # we'll get it from the linux-firmware package and we don't want conflicts + %{make} -s %{?_smp_mflags} ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= + fi + +%if %{with_gcov} + # install gcov-needed files to $BUILDROOT/$BUILD/...: + # gcov_info->filename is absolute path + # gcno references to sources can use absolute paths (e.g. in out-of-tree builds) + # sysfs symlink targets (set up at compile time) use absolute paths to BUILD dir + find . \( -name '*.gcno' -o -name '*.[chS]' \) -exec install -D '{}' "$RPM_BUILD_ROOT/$(pwd)/{}" \; +%endif + + if [ $DoVDSO -ne 0 ]; then + %{make} -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if [ ! -s ldconfig-kernel.conf ]; then + echo > ldconfig-kernel.conf "\ + # Placeholder file, no vDSO hwcap entries used in this kernel." + fi + %{__install} -D -m 444 ldconfig-kernel.conf \ + $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-$KernelVer.conf + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/vdso/.build-id + fi + + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/source + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + (cd $RPM_BUILD_ROOT/lib/modules/$KernelVer ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/updates + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/weak-updates + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -s Module.markers ]; then + cp Module.markers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + fi + + # create the kABI metadata for use in packaging + # NOTENOTE: the name symvers is used by the rpm backend + # NOTENOTE: to discover and run the /usr/lib/rpm/fileattrs/kabi.attr + # NOTENOTE: script which dynamically adds exported kernel symbol + # NOTENOTE: checksums to the rpm metadata provides list. + # NOTENOTE: if you change the symvers name, update the backend too + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < Module.symvers > $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz + cp $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz $RPM_BUILD_ROOT/lib/modules/$KernelVer/symvers.gz + + # then drop all but the needed Makefiles/Kconfig files + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/tracing + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/spdxcheck.py + if [ -f tools/objtool/objtool ]; then + cp -a tools/objtool/objtool $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/tools/objtool/ || : + fi + if [ -d arch/$Arch/scripts ]; then + cp -a arch/$Arch/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || : + fi + if [ -f arch/$Arch/*lds ]; then + cp -a arch/$Arch/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || : + fi + if [ -f arch/%{asmarch}/kernel/module.lds ]; then + cp -a --parents arch/%{asmarch}/kernel/module.lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o + if [ -d arch/%{asmarch}/include ]; then + cp -a --parents arch/%{asmarch}/include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi +%ifarch aarch64 + # arch/arm64/include/asm/xen references arch/arm + cp -a --parents arch/arm/include/asm/xen $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + # arch/arm64/include/asm/opcodes.h references arch/arm + cp -a --parents arch/arm/include/asm/opcodes.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + cp -a include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include +%ifarch x86_64 + # files for 'make prepare' to succeed with kernel-devel + cp -a --parents arch/x86/entry/syscalls/syscall_32.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/entry/syscalls/syscall_64.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_32.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_64.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_common.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents tools/include/tools/le_byteshift.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/purgatory.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/stack.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/setup-x86_64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/entry64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/ctype.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + + cp -a --parents scripts/syscalltbl.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents scripts/syscallhdr.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + # Make sure the Makefile, version.h, and auto.conf have a matching + # timestamp so that external modules can be built + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/generated/uapi/linux/version.h \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/config/auto.conf + +%if %{with_debuginfo} + eu-readelf -n vmlinux | grep "Build ID" | awk '{print $NF}' > vmlinux.id + cp vmlinux.id $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/vmlinux.id + + # + # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm + # + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer +%endif + + find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f >modnames + + # mark modules executable so that strip-to-file can strip them + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + + grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt(l_|2x00)(pci|usb)_probe|register_netdevice' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + + # detect missing or incorrect license tags + ( find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name '*.ko' | xargs /sbin/modinfo -l | \ + grep -E -v 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' ) && exit 1 + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + + if [ $DoModules -eq 1 ]; then + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + else + # Ensure important files/directories exist to let the packaging succeed + mkdir -p lib/modules/$KernelVer/kernel + # Add files usually created by make modules, needed to prevent errors + # thrown by depmod during package installation + touch lib/modules/$KernelVer/modules.order + touch lib/modules/$KernelVer/modules.builtin + fi + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Cleanup + rm System.map + popd + +%if %{signmodules} + if [ $DoModules -eq 1 ]; then + # Save the signing keys so we can sign the modules in __modsign_install_post + cp certs/signing_key.pem certs/signing_key.pem.sign${Flav} + cp certs/signing_key.x509 certs/signing_key.x509.sign${Flav} + fi +%endif + + # Move the devel headers out of the root file system + mkdir -p $RPM_BUILD_ROOT/usr/src/kernels + mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir + + # This is going to create a broken link during the build, but we don't use + # it after this point. We need the link to actually point to something + # when kernel-devel is installed, and a relative link doesn't work across + # the F17 UsrMove feature. + ln -sf $DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + + # prune junk from kernel-devel + find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \; + + # build a BLS config for this kernel + %{SOURCE43} "$KernelVer" "$RPM_BUILD_ROOT" "%{?variant}" +} + +### +# DO it... +### + +# prepare directories +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/boot +mkdir -p $RPM_BUILD_ROOT%{_libexecdir} + +cd linux-%{KVERREL} + + +%if %{with_debug} +BuildKernel %make_target %kernel_image debug +%endif + +%if %{with_up} +BuildKernel %make_target %kernel_image +%endif + +%global perf_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" -C tools/perf V=1 NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 NO_BIONIC=1 LIBBPF_DYNAMIC=1 LIBTRACEEVENT_DYNAMIC=1 %{?perf_build_extra_opts} prefix=%{_prefix} PYTHON=%{__python3} +%if %{with_perf} +# perf +# make sure check-headers.sh is executable +chmod +x tools/perf/check-headers.sh +%{perf_make} DESTDIR=$RPM_BUILD_ROOT all +%endif + +%global tools_make \ + %{make} V=1 CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" + +%if %{with_tools} +%ifarch %{cpupowerarchs} +# cpupower +# make sure version-gen.sh is executable. +chmod +x tools/power/cpupower/utils/version-gen.sh +%{tools_make} -C tools/power/cpupower CPUFREQ_BENCH=false DEBUG=false +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + %{tools_make} centrino-decode powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/x86/x86_energy_perf_policy/ + %{tools_make} + popd + pushd tools/power/x86/turbostat + %{tools_make} + popd + pushd tools/power/x86/intel-speed-select + %{make} + popd +%endif +%endif +pushd tools/thermal/tmon/ +%{make} V=1 +popd +pushd tools/iio/ +%{make} V=1 +popd +pushd tools/gpio/ +%{make} V=1 +popd +# build MM tools +pushd tools/mm/ +%{make} V=1 slabinfo page_owner_sort page-types +popd +%endif + +%global bpftool_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" EXTRA_LDFLAGS="%{__global_ldflags}" DESTDIR=$RPM_BUILD_ROOT V=1 +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} +popd +%endif + +%if %{with_doc} +# Make the HTML pages. +make htmldocs || %{doc_build_fail} + +# sometimes non-world-readable files sneak into the kernel source tree +chmod -R a=rX Documentation +find Documentation -type d | xargs chmod u+w +%endif + +# In the modsign case, we do 3 things. 1) We check the "flavour" and hard +# code the value in the following invocations. This is somewhat sub-optimal +# but we're doing this inside of an RPM macro and it isn't as easy as it +# could be because of that. 2) We restore the .tmp_versions/ directory from +# the one we saved off in BuildKernel above. This is to make sure we're +# signing the modules we actually built/installed in that flavour. 3) We +# grab the arch and invoke mod-sign.sh command to actually sign the modules. +# +# We have to do all of those things _after_ find-debuginfo runs, otherwise +# that will strip the signature off of the modules. + +%define __modsign_install_post \ + if [ "%{signmodules}" -eq "1" ]; then \ + if [ "%{with_debug}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign+debug certs/signing_key.x509.sign+debug $RPM_BUILD_ROOT/lib/modules/%{KVERREL}+debug/ \ + fi \ + if [ "%{with_up}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign certs/signing_key.x509.sign $RPM_BUILD_ROOT/lib/modules/%{KVERREL}/ \ + fi \ + fi \ +%{nil} + +### +### Special hacks for debuginfo subpackages. +### + +# This macro is used by %%install, so we must redefine it before that. +%define debug_package %{nil} + +%if %{with_debuginfo} + +%ifnarch noarch +%global __debug_package 1 +%files -f debugfiles.list debuginfo-common-%{_target_cpu} +%defattr(-,root,root) +%endif + +%endif + +# +# Disgusting hack alert! We need to ensure we sign modules *after* all +# invocations of strip occur, which is in __debug_install_post if +# find-debuginfo.sh runs, and __os_install_post if not. +# +%define __spec_install_post \ + %{?__debug_package:%{__debug_install_post}}\ + %{__arch_install_post}\ + %{__os_install_post}\ + %{__modsign_install_post} + +### +### install +### + +%install + +cd linux-%{KVERREL} + +%if %{with_doc} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion} + +# copy the source over +mkdir -p $docdir +tar -h -f - --exclude=man --exclude='.*' -c Documentation | tar xf - -C $docdir + +# with_doc +%endif + +# We have to do the headers install before the tools install because the +# kernel headers_install will remove any header files in /usr/include that +# it doesn't install itself. + +%if %{with_headers} +# Install kernel headers +%{make} ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +%endif + +%if %{with_perf} +# perf tool binary and supporting scripts/binaries +%{perf_make} DESTDIR=$RPM_BUILD_ROOT lib=%{_lib} install-bin +# remove the 'trace' symlink. +rm -f %{buildroot}%{_bindir}/trace + +# For both of the below, yes, this should be using a macro but right now +# it's hard coded and we don't actually want it anyway right now. +# Whoever wants examples can fix it up! + +# remove examples +rm -rf %{buildroot}/usr/lib/perf/examples +rm -rf %{buildroot}/usr/lib/perf/include + +# python-perf extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +mkdir -p %{buildroot}/%{_mandir}/man1 +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +# remove any tracevent files, eg. its plugins still gets built and installed, +# even if we build against system's libtracevent during perf build (by setting +# LIBTRACEEVENT_DYNAMIC=1 above in perf_make macro). Those files should already +# ship with libtraceevent package. +rm -rf %{buildroot}%{_libdir}/traceevent +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%{make} -C tools/power/cpupower DESTDIR=$RPM_BUILD_ROOT libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install +rm -f %{buildroot}%{_libdir}/*.{a,la} +%find_lang cpupower +mv cpupower.lang ../ +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* +mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig +install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service +install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower +%endif +%ifarch x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + pushd tools/power/x86/x86_energy_perf_policy + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/turbostat + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/intel-speed-select + %{make} DESTDIR=%{buildroot} install + popd +%endif +pushd tools/thermal/tmon +%{make} V=1 INSTALL_ROOT=%{buildroot} install +popd +pushd tools/iio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/gpio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/kvm/kvm_stat +make INSTALL_ROOT=%{buildroot} install-tools +make INSTALL_ROOT=%{buildroot} install-man +popd +# install MM tools +pushd tools/mm/ +install -m755 slabinfo %{buildroot}%{_bindir}/slabinfo +install -m755 page_owner_sort %{buildroot}%{_bindir}/page_owner_sort +install -m755 page-types %{buildroot}%{_bindir}/page-types +popd +%endif + +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} prefix=%{_prefix} bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +popd +%endif + +# We have to do the headers checksum calculation after the tools install because +# these might end up installing their own set of headers on top of kernel's +%if %{with_headers} +# compute a content hash to export as Provides: kernel-headers-checksum +HEADERS_CHKSUM=$(export LC_ALL=C; find $RPM_BUILD_ROOT/usr/include -type f -name "*.h" \ + ! -path $RPM_BUILD_ROOT/usr/include/linux/version.h | \ + sort | xargs cat | sha1sum - | cut -f 1 -d ' '); +# export the checksum via usr/include/linux/version.h, so the dynamic +# find-provides can grab the hash to update it accordingly +echo "#define KERNEL_HEADERS_CHECKSUM \"$HEADERS_CHKSUM\"" >> $RPM_BUILD_ROOT/usr/include/linux/version.h +%endif + +### +### clean +### + +%clean +rm -rf $RPM_BUILD_ROOT + +### +### scripts +### + +%if %{with_tools} +%post -n %{name}-tools-libs +/sbin/ldconfig + +%postun -n %{name}-tools-libs +/sbin/ldconfig +%endif + +# +# This macro defines a %%post script for a kernel*-devel package. +# %%kernel_devel_post [] +# +%define kernel_devel_post() \ +%{expand:%%post %{?1:%{1}-}devel}\ +if [ -f /etc/sysconfig/kernel ]\ +then\ + . /etc/sysconfig/kernel || exit $?\ +fi\ +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ]\ +then\ + (cd /usr/src/kernels/%{KVERREL}%{?1:+%{1}} &&\ + /usr/bin/find . -type f | while read f; do\ + hardlink -c /usr/src/kernels/*%{?dist}.*/$f $f\ + done)\ +fi\ +%{nil} + +# This macro defines a %%posttrans script for a kernel package. +# %%kernel_variant_posttrans [] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_posttrans() \ +%{expand:%%posttrans %{?1:%{1}}}\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --add-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +/bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +%{nil} + +# +# This macro defines a %%post script for a kernel package and its devel package. +# %%kernel_variant_post [-v ] [-r ] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_post(v:r:) \ +%{expand:%%kernel_devel_post %{?-v*}}\ +%{expand:%%kernel_variant_posttrans %{?-v*}}\ +%{expand:%%post %{?-v*}}\ +%{-r:\ +if [ `uname -i` == "x86_64" -o `uname -i` == "i386" ] &&\ + [ -f /etc/sysconfig/kernel ]; then\ + /bin/sed -r -i -e 's/^DEFAULTKERNEL=%{-r*}$/DEFAULTKERNEL=kernel%{?-v:-%{-v*}}/' /etc/sysconfig/kernel || exit $?\ +fi}\ +%{nil} + +# +# This macro defines a %%preun script for a kernel package. +# %%kernel_variant_preun +# +%define kernel_variant_preun() \ +%{expand:%%preun %{?1}}\ +/bin/kernel-install remove %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --remove-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +%{nil} + +%kernel_variant_preun +%kernel_variant_post -r kernel-smp + +%kernel_variant_preun debug +%kernel_variant_post -v debug + +if [ -x /sbin/ldconfig ] +then + /sbin/ldconfig -X || exit $? +fi + +### +### file lists +### + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +# only some architecture builds need kernel-doc +%if %{with_doc} +%files doc +%defattr(-,root,root) +%{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{rpmversion} +%endif + +%if %{with_perf} +%files -n perf +%{_bindir}/perf +%{_libdir}/libperf-jvmti.so +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_datadir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%doc linux-%{KVERREL}/tools/perf/Documentation/examples.txt +%{_docdir}/perf-tip/tips.txt + +%files -n python3-perf +%{python3_sitearch}/* + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo + +%files -f python3-perf-debuginfo.list -n python3-perf-debuginfo +%endif +# with_perf +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%defattr(-,root,root) +%files -n %{name}-tools -f cpupower.lang +%{_bindir}/cpupower +%{_datadir}/bash-completion/completions/cpupower +%ifarch x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%{_bindir}/intel-speed-select +%endif +# !cpupowerarchs +%else +%files -n %{name}-tools +%defattr(-,root,root) +# cpupowerarchs +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-hammer +%{_bindir}/gpio-event-mon +%{_bindir}/gpio-watch +%{_mandir}/man1/kvm_stat* +%{_bindir}/kvm_stat +%{_bindir}/page_owner_sort +%{_bindir}/slabinfo +%{_bindir}/page-types + +%if %{with_debuginfo} +%files -f %{name}-tools-debuginfo.list -n %{name}-tools-debuginfo +%defattr(-,root,root) +%endif + +%ifarch %{cpupowerarchs} +%files -n %{name}-tools-libs +%{_libdir}/libcpupower.so.1 +%{_libdir}/libcpupower.so.0.0.1 + +%files -n %{name}-tools-libs-devel +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h +%endif +# with_tools +%endif + +%if %{with_bpftool} +%files -n bpftool +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool-cgroup.8.* +%{_mandir}/man8/bpftool-map.8.* +%{_mandir}/man8/bpftool-prog.8.* +%{_mandir}/man8/bpftool-perf.8.* +%{_mandir}/man8/bpftool.8.* +%{_mandir}/man8/bpftool-btf.8.* +%{_mandir}/man8/bpftool-feature.8.* +%{_mandir}/man8/bpftool-gen.8.* +%{_mandir}/man8/bpftool-iter.8.* +%{_mandir}/man8/bpftool-link.8.* +%{_mandir}/man8/bpftool-net.8.* +%{_mandir}/man8/bpftool-struct_ops.8.* + +%if %{with_debuginfo} +%files -f bpftool-debuginfo.list -n bpftool-debuginfo +%defattr(-,root,root) +%endif +%endif + +# empty meta-package +%ifnarch %nobuildarches noarch +%files +%defattr(-,root,root) +%endif + +%if %{with_gcov} +%ifarch x86_64 aarch64 +%files gcov +%defattr(-,root,root) +%{_builddir} +%endif +%endif + +# This is %%{image_install_path} on an arch where that includes ELF files, +# or empty otherwise. +%define elf_image_install_path %{?kernel_image_elf:%{image_install_path}} + +# +# This macro defines the %%files sections for a kernel package +# and its devel and debuginfo packages. +# %%kernel_variant_files [-k vmlinux] +# +%define kernel_variant_files(k:) \ +%if %{1}\ +%{expand:%%files %{?2}}\ +%defattr(-,root,root)\ +%{!?_licensedir:%global license %%doc}\ +%license linux-%{KVERREL}/COPYING-%{version}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/%{?-k:%{-k*}}%{!?-k:vmlinuz}\ +%ghost /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/.vmlinuz.hmac \ +%ghost /%{image_install_path}/.vmlinuz-%{KVERREL}%{?2:+%{2}}.hmac \ +%ifarch aarch64\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/dtb \ +%ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ +%endif\ +%attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ +%attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/config\ +%attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%dir /lib/modules\ +%dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/build\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/source\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/weak-updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/bls.conf\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/modules.*\ +%{expand:%%files %{?2:%{2}-}devel}\ +%defattr(-,root,root)\ +%defverify(not mtime)\ +/usr/src/kernels/%{KVERREL}%{?2:+%{2}}\ +%if %{with_debuginfo}\ +%ifnarch noarch\ +%{expand:%%files -f debuginfo%{?2}.list %{?2:%{2}-}debuginfo}\ +%defattr(-,root,root)\ +%endif\ +%endif\ +%endif\ +%{nil} + +%kernel_variant_files %{with_up} +%kernel_variant_files %{with_debug} debug + +# plz don't put in a version string unless you're going to tag +# and build. +# +# +%changelog + -- Gitee From 8cd51efdcbfea0b59d930d6e7ebb9ba6e9f7da1a Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 29 Dec 2023 14:33:05 +0800 Subject: [PATCH 003/953] anolis: spec: avoid override system %rpmversion definition ANBZ: #7810 It will report such error on Anolis 23 baseos: > # yum-builddep -y kernel.spec > Last metadata expiration check: 0:21:45 ago on Thu Dec 28 09:02:26 2023. > RPM: error: kernel.spec: line 11: Macro %rpmversion is a built-in (%define) > Failed to open: 'kernel.spec', not a valid spec file: can't parse specfile > > Error: Some packages could not be found. The root cause is that higher version rpmbuild has built-in the variable %rpmversion, which is confilict with kernel.spec. So replace variable %rpmversion to %kernelversion. Fixes: 94293ca20de2("anolis: spec: add basic framework to generate rpm tree") Signed-off-by: Qiao Ma --- anolis/rpm/kernel.spec.template | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 0c270e7981fa..ff6c6285960b 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -14,7 +14,7 @@ # define buildid .local %global dist %%DIST%% -%define rpmversion %%DIST_KERNELVERSION%% +%define kernelversion %%DIST_KERNELVERSION%% %define pkgrelease %%DIST_PKGRELEASEVERION%% # allow pkg_release to have configurable %%{?dist} tag @@ -58,9 +58,6 @@ # should we do C=1 builds with sparse %define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} -# The kernel tarball/base version -%define kversion 5.10 - %define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} # turn off debug kernel for gcov builds @@ -171,7 +168,7 @@ Name: kernel%{?variant} Group: System Environment/Kernel License: GPLv2 and Redistributable, no modification permitted URL: http://www.kernel.org/ -Version: %{rpmversion} +Version: %{kernelversion} Release: %{pkg_release} Summary: The Linux kernel, based on version %{version}, heavily modified with backports # DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. @@ -246,7 +243,7 @@ BuildRequires: xmlto BuildRequires: asciidoc %endif -Source0: linux-%{rpmversion}-%{pkg_release}.tar.xz +Source0: linux-%{kernelversion}-%{pkg_release}.tar.xz %define modsign_cmd %{SOURCE18} @@ -290,8 +287,8 @@ enterprise customers, etc. # macros defined above. # %define kernel_reqprovconf \ -Provides: %{name} = %{rpmversion}-%{pkg_release}\ -Provides: %{name}-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: %{name} = %{kernelversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{kernelversion}-%{pkg_release}%{?1:+%{1}}\ Provides: kernel-drm-nouveau = 16\ Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ Requires(pre): %{kernel_prereq}\ @@ -329,8 +326,8 @@ Group: Development/System Obsoletes: glibc-kernheaders < 3.0-46 Provides: glibc-kernheaders = 3.0-46 %if "0%{?variant}" -Obsoletes: kernel-headers < %{rpmversion}-%{pkg_release} -Provides: kernel-headers = %{rpmversion}-%{pkg_release} +Obsoletes: kernel-headers < %{kernelversion}-%{pkg_release} +Provides: kernel-headers = %{kernelversion}-%{pkg_release} %endif %description headers Kernel-headers includes the C header files that specify the interface @@ -611,8 +608,8 @@ ApplyOptionalPatch() fi } -%setup -q -n %{name}-%{rpmversion}-%{pkg_release} -c -mv linux-%{rpmversion}-%{pkg_release} linux-%{KVERREL} +%setup -q -n %{name}-%{kernelversion}-%{pkg_release} -c +mv linux-%{kernelversion}-%{pkg_release} linux-%{KVERREL} cd linux-%{KVERREL} @@ -1172,7 +1169,7 @@ find Documentation -type d | xargs chmod u+w cd linux-%{KVERREL} %if %{with_doc} -docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{kernelversion} # copy the source over mkdir -p $docdir @@ -1396,9 +1393,9 @@ fi %if %{with_doc} %files doc %defattr(-,root,root) -%{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation/* -%dir %{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation -%dir %{_datadir}/doc/kernel-doc-%{rpmversion} +%{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{kernelversion} %endif %if %{with_perf} -- Gitee From 9a3801c10beec95da19e074e2680fbcbe6b9c79e Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:01:10 +0800 Subject: [PATCH 004/953] anolis: x86/cpu/hygon: Fix __max_die_per_package for Hygon family 18h model 4h ANBZ: #5455 From model 4h, Hygon processors use CPUID leaf 0xB to derive the core ID, socket ID and APIC ID with the SMT and CORE level types. But still set __max_die_per_package to nodes_per_socket because of lacking the DIE level type. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/hygon.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 6e738759779e..f0482c9d49fd 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -80,12 +80,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->x86_max_cores /= smp_num_siblings; /* - * In case leaf B is available, use it to derive + * From model 0x4, leaf B is available, so use it to derive * topology information. */ err = detect_extended_topology(c); - if (!err) + if (!err) { c->x86_coreid_bits = get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } /* * Socket ID is ApicId[6] for the processors with model <= 0x3 -- Gitee From 53aa4a43aa6012d252f9f2933277d919e48de6e1 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:02:06 +0800 Subject: [PATCH 005/953] anolis: x86/microcode/hygon: Add microcode loading support for Hygon processors ANBZ: #5455 Add support for loading Hygon microcode, which is compatible with AMD one. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- Documentation/arch/x86/microcode.rst | 13 +++++- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/microcode/amd.c | 52 ++++++++++++++++++++---- arch/x86/kernel/cpu/microcode/core.c | 22 +++++++++- arch/x86/kernel/cpu/microcode/internal.h | 12 ++++++ 5 files changed, 90 insertions(+), 11 deletions(-) diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst index b627c6f36bcf..69c04052861d 100644 --- a/Documentation/arch/x86/microcode.rst +++ b/Documentation/arch/x86/microcode.rst @@ -35,6 +35,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -69,6 +71,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -217,7 +223,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -227,6 +234,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4b81e884a614..c148b13ef777 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1311,7 +1311,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index bbd1dc38ea03..296b1f327d24 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -493,15 +493,18 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -521,11 +524,18 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data if (IS_ENABLED(CONFIG_X86_32)) { uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = (const char *)__pa_nodebug( + "kernel/x86/microcode/HygonGenuine.bin"); + else + path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { uci = ucode_cpu_info; - path = ucode_path; + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; use_pa = false; } @@ -561,8 +571,14 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + const char *path; - cp = find_microcode_in_initrd(ucode_path, false); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; + + cp = find_microcode_in_initrd(path, false); if (!(cp.data && cp.size)) return -EINVAL; @@ -904,13 +920,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -960,6 +980,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a4ebd5e0ae82..98245c19a90d 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -41,7 +41,11 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_CPU_SUP_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; +#endif static bool dis_ucode_ldr = true; bool initrd_gone; @@ -125,7 +129,8 @@ static bool __init check_loader_disabled_bsp(void) if (native_cpuid_ecx(1) & BIT(31)) return *res; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) return *res; } @@ -158,6 +163,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -198,6 +207,9 @@ void load_ucode_ap(void) if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_early(cpuid_1_eax); break; + case X86_VENDOR_HYGON: + load_ucode_amd_early(cpuid_1_eax); + break; default: break; } @@ -222,6 +234,9 @@ static int __init save_microcode_in_initrd(void) if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; + case X86_VENDOR_HYGON: + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + break; default: break; } @@ -316,6 +331,9 @@ static void reload_early_microcode(unsigned int cpu) if (family >= 0x10) reload_ucode_amd(cpu); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(cpu); + break; default: break; } @@ -642,6 +660,8 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index bf883aa71233..9e76fe430812 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -55,6 +55,9 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -81,6 +84,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -114,6 +120,12 @@ static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } #endif /* !CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops *init_hygon_microcode(void); +#else /* CONFIG_CPU_SUP_HYGON */ +static const inline struct microcode_ops *init_hygon_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_HYGON */ + #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -- Gitee From 6982d38a2fb50653d389b7df2b89832116419833 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:02:41 +0800 Subject: [PATCH 006/953] anolis: x86/amd_nb: Add Hygon family 18h model 4h PCI IDs ANBZ: #5455 Add the PCI device IDs for Hygon family 18h model 4h processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index cab4d8b1535d..5250de895e04 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -123,16 +123,19 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, {} }; -- Gitee From a4f2a7bf3f702b4fb6d1b03a17573398c0179040 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:03:10 +0800 Subject: [PATCH 007/953] anolis: x86/amd_nb: Add northbridge support for Hygon family 18h model 4h ANBZ: #5455 Add dedicated functions to initialize the northbridge for Hygon family 18h model 4h processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/include/asm/amd_nb.h | 8 ++ arch/x86/kernel/amd_nb.c | 193 ++++++++++++++++++++++++++++++++++ 2 files changed, 201 insertions(+) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index ed0eaf65c437..4230a80a5a9d 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -82,6 +82,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -119,6 +123,10 @@ static inline bool amd_gart_present(void) #define node_to_amd_nb(x) NULL #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 5250de895e04..6c30cbd7a59b 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -44,10 +44,13 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 + /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -222,6 +225,191 @@ int amd_smn_write(u16 node, u32 address, u32 value) } EXPORT_SYMBOL_GPL(amd_smn_write); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) +{ + struct pci_dev *df_f1 = NULL; + int err; + + while ((df_f1 = pci_get_device(misc->vendor, + PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1, df_f1))) + if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && + df_f1->bus->number == misc->bus->number && + PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_f1) { + pr_warn("Error getting DF F1 device.\n"); + return -ENODEV; + } + + err = pci_read_config_dword(df_f1, offset, value); + if (err) + pr_warn("Error reading DF F1 register.\n"); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + /* F1x200[23:20]: DF ID */ + ret = get_df1_register(misc, 0x200, &value); + *id = (value >> 20) & 0xf; + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df1_register(misc, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} static int amd_cache_northbridges(void) { @@ -242,6 +430,11 @@ static int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; -- Gitee From d64149427958e4f214e972ba5205bfb767f1bba4 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:03:39 +0800 Subject: [PATCH 008/953] anolis: iommu/hygon: Add support for Hygon family 18h model 4h IOAPIC ANBZ: #5455 The SB IOAPIC is on the device 0xb from Hygon family 18h model 4h. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/iommu/amd/init.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index a2ad2dbd04d9..254744ac786f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3004,6 +3004,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -3029,7 +3032,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } -- Gitee From 04d7ccd2441ef5ec2bd29f7e2b03f9c16ea6ebfb Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:15 +0800 Subject: [PATCH 009/953] anolis: EDAC/amd64: Get UMC channel from the 6th nibble for Hygon ANBZ: #5455 On Hygon family 18h platforms, we look at the 6th nibble(bit 20~23) in the instance_id to derive the channel number. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9b6642d00871..b782ab937f08 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3057,7 +3057,11 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static void umc_get_err_info(struct mce *m, struct err_info *err) { - err->channel = (m->ipid & GENMASK(31, 0)) >> 20; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + err->channel = (m->ipid & GENMASK(23, 0)) >> 20; + else + err->channel = (m->ipid & GENMASK(31, 0)) >> 20; err->csrow = m->synd & 0x7; } -- Gitee From d78f813ddc5ec629226f780308763b9680a834ce Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:42 +0800 Subject: [PATCH 010/953] anolis: EDAC/amd64: Add support for Hygon family 18h model 4h ANBZ: #5455 Add support for Hygon family 18h model 4h to get UMC base, instance number and determine DDR memory types. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 61 ++++++++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index b782ab937f08..bd9dc02aa8cc 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -96,6 +96,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return err; } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -1608,7 +1619,10 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -1717,11 +1731,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -1739,8 +1759,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1823,7 +1843,8 @@ static void umc_determine_memory_type(struct amd64_pvt *pvt) * Check if the system supports the "DDR Type" field in UMC Config * and has DDR5 DIMMs in use. */ - if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if ((pvt->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { if (umc->dimm_cfg & BIT(5)) umc->dram_type = MEM_LRDDR5; else if (umc->dimm_cfg & BIT(4)) @@ -3176,8 +3197,11 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); @@ -4108,6 +4132,11 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: + if (pvt->model == 0x4) { + pvt->ctl_name = "F18h_M04h"; + pvt->max_mcs = 3; + break; + } pvt->ctl_name = "F18h"; break; @@ -4371,6 +4400,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; if (ghes_get_devices()) @@ -4388,8 +4418,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -4397,7 +4432,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -4442,6 +4477,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -4453,7 +4489,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); -- Gitee From 1f5204db363378d86f177f63c90e8bd3c9c7a177 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:58 +0800 Subject: [PATCH 011/953] anolis: EDAC/amd64: Adjust address translation for Hygon family 18h model 4h ANBZ: #5455 Add Hygon family 18h model 4h processor support for DramOffset and HiAddrOffset, and get the socket interleaving number from DramBase- Address(D18F0x110). Update intlv_num_chan and num_intlv_bits support for Hygon family 18h model 4h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 40 ++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index bd9dc02aa8cc..5ba8791b2631 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1146,8 +1146,11 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr ctx.nid = nid; ctx.inst_id = umc; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + df_indirect_read_instance(nid, 0, 0x214, umc, &ctx.tmp)) + goto out_err; + else if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -1171,6 +1174,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 2) & 0x3; lgcy_mmio_hole_en = ctx.tmp & BIT(1); intlv_num_chan = (ctx.tmp >> 4) & 0xF; intlv_addr_sel = (ctx.tmp >> 8) & 0x7; @@ -1187,7 +1193,8 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) goto out_err; - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; intlv_num_dies = (ctx.tmp >> 10) & 0x3; dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -1205,6 +1212,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -1223,8 +1233,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -1243,7 +1254,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) goto out_err; - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (ctx.tmp >> 8) & 0x7FF; + else + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -1263,8 +1277,13 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (ctx.tmp >> 12) & 0xF; + die_id_mask = ctx.tmp & 0x7FF; + } else { + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -1272,7 +1291,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (ctx.tmp >> 16) & 0x7FF; + else + socket_id_mask = (ctx.tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } -- Gitee From aceaa22bdd6dafb543274127e16a67021de410cb Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:05:40 +0800 Subject: [PATCH 012/953] anolis: EDAC/mce_amd: Use struct cpuinfo_x86.logical_die_id for Hygon NodeId ANBZ: #5455 The cpuinfo_x86.cpu_die_id is get from CPUID or MSR in the commit 028c221ed190 ("x86/CPU/AMD: Save AMD NodeId as cpu_die_id"). But the value may not be continuous for Hygon model 4h~6h processors. Use cpuinfo_x86.logical_die_id will always format continuous die (or node) IDs, because it will convert the physical die ID to logical die ID. So use topology_logical_die_id() instead of topology_die_id() to decode UMC ECC errors for Hygon processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/mce_amd.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df..06e29d2b51d1 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1187,8 +1187,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && - xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) -- Gitee From f6fb58dc29c16b33db8aaa3d9de4d10a44303d66 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:06:12 +0800 Subject: [PATCH 013/953] anolis: hwmon/k10temp: Add support for Hygon family 18h model 4h ANBZ: #5455 The DF F3 device ID used to get the temperature for Hygon family 18h model 4h processor is the same as 17H_M30H, but with different offsets, which may span two distributed ranges. The second offset range can be considered as private for Hygon, so use struct hygon_private to describe it. Add a pointer priv in k10temp_data to point to the private data. Add functions k10temp_get_ccd_support_2nd() and hygon_read_temp() to support reading the second offset range. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/hwmon/k10temp.c | 84 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index bae0becfa24b..ed953a33ed5c 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -84,6 +84,11 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define AMD_I3255_STR "3255" +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -94,6 +99,7 @@ struct k10temp_data { bool is_zen; u32 ccd_offset; bool disp_negative; + void *priv; }; #define TCTL_BIT 0 @@ -201,6 +207,23 @@ static int k10temp_read_labels(struct device *dev, return 0; } +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) +{ + struct hygon_private *h_priv; + + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); +} + static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { @@ -221,7 +244,10 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), ZEN_CCD_TEMP(data->ccd_offset, channel - 2), ®val); *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; @@ -388,14 +414,48 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, } } +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -423,7 +483,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + } else if (boot_cpu_data.x86 == 0x17) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; data->is_zen = true; @@ -448,6 +508,25 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(pdev, data, 8); break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; @@ -528,6 +607,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From b762a7346017e429ccb81c0c1bba0c905e5a1820 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:06:38 +0800 Subject: [PATCH 014/953] anolis: i2c-piix4: Remove the IMC detecting for Hygon SMBus ANBZ: #5455 Remove IMC detecting path for Hygon processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/i2c/busses/i2c-piix4.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 809fbd014cd6..cc170c114e10 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1043,8 +1043,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; -- Gitee From c3680058751071f26fd9b3792058529ec2fc99fd Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:05 +0800 Subject: [PATCH 015/953] anolis: x86/cpu: Get LLC ID for Hygon family 18h model 5h ANBZ: #5455 Add support to calculate LLC ID from the number of threads sharing the cache for Hygon family 18h model 5h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/cacheinfo.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 8f86eacf69f7..7c4ce361c728 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,11 +708,30 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) -- Gitee From 5ac7fbd6d7ed4d070983559d98a8dfdc75368e3a Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:27 +0800 Subject: [PATCH 016/953] anolis: x86/amd_nb: Add support for Hygon family 18h model 5h ANBZ: #5455 Add root and DF F1/F3/F4 device IDs for Hygon family 18h model 5h processors. But some model 5h processors have the legacy(M04H) DF devices, so add a if conditional to read the df1 register. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 24 ++++++++++++++++++++++-- include/linux/pci_ids.h | 1 + 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6c30cbd7a59b..0b337c572937 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -44,7 +44,10 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); @@ -127,18 +130,21 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, {} }; @@ -248,10 +254,24 @@ EXPORT_SYMBOL_GPL(hygon_nb_num); static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) { struct pci_dev *df_f1 = NULL; + u32 device; int err; - while ((df_f1 = pci_get_device(misc->vendor, - PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1, df_f1))) + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + default: + return -ENODEV; + } + + while ((df_f1 = pci_get_device(misc->vendor, device, df_f1))) if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && df_f1->bus->number == misc->bus->number && PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a6c66ea80194..004a341837f2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2596,6 +2596,7 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad -- Gitee From 119ebd6ae39245264e2b36efbc4c5671f1c7332d Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:50 +0800 Subject: [PATCH 017/953] anolis: EDAC/amd64: Add support for Hygon family 18h model 5h ANBZ: #5455 Add Hygon family 18h model 5h processor support for amd64_edac. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 5ba8791b2631..e6f0701cc67b 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4158,6 +4158,10 @@ static int per_family_init(struct amd64_pvt *pvt) pvt->ctl_name = "F18h_M04h"; pvt->max_mcs = 3; break; + } else if (pvt->model == 0x5) { + pvt->ctl_name = "F18h_M05h"; + pvt->max_mcs = 1; + break; } pvt->ctl_name = "F18h"; break; -- Gitee From a3c00b65682dc451e0ca0e11d352679626b49bdb Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:08:20 +0800 Subject: [PATCH 018/953] anolis: hwmon/k10temp: Add support for Hygon family 18h model 5h ANBZ: #5455 Add 18H_M05H DF F3 device ID to get the temperature for Hygon family 18h model 5h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/hwmon/k10temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ed953a33ed5c..faf3955a311f 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -608,6 +608,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From f44c6bcd779eaaf07742f5e952ec6d7250dd1edf Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:08:52 +0800 Subject: [PATCH 019/953] anolis: x86/amd_nb: Add support for Hygon family 18h model 6h ANBZ: #5455 Hygon family 18h model 6h processor has the same DF F1 device ID as M05H_DF_F1, but should get DF ID from DF F5 device. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 70 ++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0b337c572937..9f759c7bf037 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -48,6 +48,7 @@ #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); @@ -251,40 +252,55 @@ u16 hygon_nb_num(void) } EXPORT_SYMBOL_GPL(hygon_nb_num); -static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) { - struct pci_dev *df_f1 = NULL; + struct pci_dev *df_func = NULL; u32 device; int err; - switch (boot_cpu_data.x86_model) { - case 0x4: - device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; - break; - case 0x5: - if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) - device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; - else + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; - break; - default: + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { return -ENODEV; } - while ((df_f1 = pci_get_device(misc->vendor, device, df_f1))) - if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && - df_f1->bus->number == misc->bus->number && - PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) break; - if (!df_f1) { - pr_warn("Error getting DF F1 device.\n"); + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); return -ENODEV; } - err = pci_read_config_dword(df_f1, offset, value); + err = pci_read_config_dword(df_func, offset, value); if (err) - pr_warn("Error reading DF F1 register.\n"); + pr_warn("Error reading DF F%d register.\n", func); return err; } @@ -294,9 +310,15 @@ int get_df_id(struct pci_dev *misc, u8 *id) u32 value; int ret; - /* F1x200[23:20]: DF ID */ - ret = get_df1_register(misc, 0x200, &value); - *id = (value >> 20) & 0xf; + if (boot_cpu_data.x86_model == 0x6) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } return ret; } @@ -308,7 +330,7 @@ static u8 get_socket_num(struct pci_dev *misc) int ret; /* F1x200[7:0]: Which socket is present. */ - ret = get_df1_register(misc, 0x200, &value); + ret = get_df_register(misc, 1, 0x200, &value); return ret ? 0 : hweight8(value & 0xff); } -- Gitee From d148f1c2fe7fca476a4ea2c66642d1542034ac52 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:09:21 +0800 Subject: [PATCH 020/953] anolis: EDAC/amd64: Add support for Hygon family 18h model 6h ANBZ: #5455 Add Hygon family 18h model 6h processor support for amd64_edac. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index e6f0701cc67b..d3761289044d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4162,6 +4162,9 @@ static int per_family_init(struct amd64_pvt *pvt) pvt->ctl_name = "F18h_M05h"; pvt->max_mcs = 1; break; + } else if (pvt->model == 0x6) { + pvt->ctl_name = "F18h_M06h"; + break; } pvt->ctl_name = "F18h"; break; -- Gitee From 515fd234019fdfb1c334c91f04b7a1906b98f180 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:09:47 +0800 Subject: [PATCH 021/953] anolis: EDAC/amd64: Adjust UMC channel for Hygon family 18h model 6h ANBZ: #5455 Hygon family 18h model 6h has 2 cs mapped to 1 umc, so adjust for it. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index d3761289044d..f02b70c911f1 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3115,6 +3115,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; node_id = fixup_node_id(node_id, m); @@ -3145,7 +3146,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } -- Gitee From 88d8f0018e26c33840aca042956fb4de392b4513 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:10:11 +0800 Subject: [PATCH 022/953] anolis: perf/x86/uncore: Add L3 PMU support for Hygon family 18h model 6h ANBZ: #5455 Adjust the L3 PMU slicemask and threadmask for Hygon family 18h model 6h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/events/amd/uncore.c | 52 +++++++++++++++++++++++++++++-- arch/x86/include/asm/perf_event.h | 8 +++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3..5100469fef32 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -196,10 +196,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -268,6 +279,13 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -325,6 +343,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -347,6 +367,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -372,6 +398,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -396,6 +428,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -709,10 +746,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model == 0x6) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 85a9fd5a3ec3..0c4a93712ef5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -60,6 +60,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) -- Gitee From a5f4c14c77bafcc2c97127476cd5963464d779e9 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:11:01 +0800 Subject: [PATCH 023/953] anolis: x86/resctrl: Add Hygon QoS support ANBZ: #5455 Add support for Hygon QoS feature. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/hygon.c | 2 ++ arch/x86/kernel/cpu/resctrl/core.c | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index f0482c9d49fd..b6f932d2d6aa 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "cpu.h" @@ -242,6 +243,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); } static void early_init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 8073fd304293..d99584690124 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -755,7 +755,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -907,7 +908,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -938,7 +940,9 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } -- Gitee From 8702c0e4c5aa56e76f1c811d9dbe7beb2de1ebf8 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:11:52 +0800 Subject: [PATCH 024/953] anolis: ALSA: hda: Add support for Hygon family 18h model 5h HD-Audio ANBZ: #5455 Add the new PCI ID 0x1d94 0x14a9 for Hygon family 18h model 5h HDA controller. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- include/linux/pci_ids.h | 1 + sound/pci/hda/hda_intel.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 004a341837f2..966f465f825f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2596,6 +2596,7 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a6a9d353fe63..5348c26b91aa 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -238,6 +238,7 @@ enum { AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, AZX_DRIVER_LOONGSON, + AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -350,6 +351,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", [AZX_DRIVER_LOONGSON] = "HDA Loongson", + [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -2753,6 +2755,9 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_LOONGSON }, { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI), .driver_data = AZX_DRIVER_LOONGSON }, + /* Hygon HDAudio */ + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); -- Gitee From 7667feb7943ba4eb77b2937520b0fd096087124d Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:12:21 +0800 Subject: [PATCH 025/953] anolis: ALSA: hda: Fix single byte writing issue for Hygon family 18h model 5h ANBZ: #5455 On Hygon family 18h model 5h controller, some registers such as GCTL, SD_CTL and SD_CTL_3B should be accessed in dword, or the writing will fail. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- include/sound/hdaudio.h | 1 + sound/hda/hdac_controller.c | 10 ++++++++-- sound/hda/hdac_stream.c | 39 ++++++++++++++++++++++++++++--------- sound/pci/hda/hda_intel.c | 4 ++++ 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48e..101183b8d3bc 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -350,6 +350,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool hygon_dword_access:1; int poll_count; diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 7f3a000fab0c..df37a85cf27c 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -410,7 +410,10 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; - snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + if (bus->hygon_dword_access) + snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + else + snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) @@ -475,7 +478,10 @@ static void azx_int_disable(struct hdac_bus *bus) /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_INT_MASK, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index fe0958f9969c..2312266939b2 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -146,11 +146,15 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ - if (bus->access_sdnctl_in_dword) + if (bus->access_sdnctl_in_dword || bus->hygon_dword_access) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else @@ -166,11 +170,21 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_start); */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { - snd_hdac_stream_updateb(azx_dev, SD_CTL, - SD_CTL_DMA_START | SD_INT_MASK, 0); - snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + struct hdac_bus *bus = azx_dev->bus; + + if (bus->hygon_dword_access) { + snd_hdac_stream_updatel(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } else { + snd_hdac_stream_updateb(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } azx_dev->running = false; } @@ -225,12 +239,16 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; + struct hdac_bus *bus = azx_dev->bus; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; - snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); @@ -238,7 +256,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 5348c26b91aa..6176fc743e50 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1880,6 +1880,10 @@ static int azx_first_init(struct azx *chip) bus->access_sdnctl_in_dword = 1; } + if (chip->driver_type == AZX_DRIVER_HYGON && + chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) + bus->hygon_dword_access = 1; + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; -- Gitee From 74437ccf004d3d58f1268a095881a77a38b7eef8 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:02 +0800 Subject: [PATCH 026/953] anolis: ata: libata: disabling PhyRdy Change Interrupt based on actual LPM capability ANBZ: #7809 The ahci spec mentions that PhyRdy Change Interrupt and Link Power Management (LPM) do not coexist. However, before enabling LPM, the driver did not check whether the host supports LPM, but directly disabled PhyRdy Change Interrupt. Increase the judgment on the actual support of LPM, and disable PhyRdy Change Interrupt only when it is supported. Signed-off-by: leoliu-oc Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/anck-next/pulls/23 --- drivers/ata/libata-eh.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1168e29cae86..e03308b45456 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3374,6 +3374,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -3382,6 +3384,11 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags*/ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(~ap->host->flags & (ATA_HOST_NO_PART | ATA_HOST_NO_SSC | ATA_HOST_NO_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if (!IS_ENABLED(CONFIG_SATA_HOST) || (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) -- Gitee From 7823679b0de659e41404ed69d90a4cb9852de700 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:09 +0800 Subject: [PATCH 027/953] anolis: Add support for Zhaoxin I2C controller ANBZ: #7809 Zhaoxin I2C Linux driver support all bidirectional bus protocols speed specified in the I2C Specification 7.0. Signed-off-by: leoliu-oc Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/anck-next/pulls/27 --- drivers/i2c/busses/Kconfig | 11 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-zhaoxin.c | 561 +++++++++++++++++++++++++++++++ 3 files changed, 573 insertions(+) create mode 100644 drivers/i2c/busses/i2c-zhaoxin.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 97d27e01a6ee..1ddff0ec93fc 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -334,6 +334,17 @@ config I2C_VIAPRO This driver can also be built as a module. If so, the module will be called i2c-viapro. +config I2C_ZHAOXIN + tristate "Zhaoxin I2C controller driver" + depends on PCI + select I2C_ALGOBIT + help + If you say yes to this option, support will be included for the + Zhaoxin I2C interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin. + if ACPI comment "ACPI drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 9be9fdb07f3d..bef7c205433b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o +obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c new file mode 100644 index 000000000000..3d4cb36c1f17 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2021 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.5.1" + +#define ZX_I2C_NAME "i2c_zhaoxin" + +/* REG_CR Bit fields */ +#define ZXI2C_REG_CR 0x00 +#define ZXI2C_CR_ENABLE BIT(0) +#define ZXI2C_CR_RX_END BIT(1) +#define ZXI2C_CR_TX_END BIT(2) +#define ZXI2C_CR_END_MASK GENMASK(2, 1) +#define ZXI2C_CR_CPU_RDY BIT(3) +#define ZXI2C_CR_MST_RST BIT(7) +#define ZXI2C_CR_FIFO_MODE BIT(14) + +/* REG_TCR Bit fields */ +#define ZXI2C_REG_TCR 0x02 +#define ZXI2C_TCR_HS_MODE BIT(13) +#define ZXI2C_TCR_MASTER_READ BIT(14) +#define ZXI2C_TCR_FAST BIT(15) + +/* REG_CSR Bit fields */ +#define ZXI2C_REG_CSR 0x04 +#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) +#define ZXI2C_CSR_READY_MASK BIT(1) + +/* REG_ISR Bit fields */ +#define ZXI2C_REG_ISR 0x06 +#define ZXI2C_ISR_NACK_ADDR BIT(0) +#define ZXI2C_ISR_BYTE_END BIT(1) +#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) +#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) +#define ZXI2C_IRQ_FIFOEND BIT(3) +#define ZXI2C_IRQ_FIFONACK BIT(4) +#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) + +/* REG_IMR Bit fields */ +#define ZXI2C_REG_IMR 0x08 +#define ZXI2C_IMR_ADDRNACK BIT(0) +#define ZXI2C_IMR_BYTE BIT(1) +#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) +#define ZXI2C_IMR_ENABLE_ALL GENMASK(2, 0) + +#define ZXI2C_REG_CLK 0x10 +#define ZXI2C_CLK_50M BIT(0) +#define ZXI2C_REG_REV 0x11 +#define ZXI2C_REG_HCR 0x12 +#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) +#define ZXI2C_REG_HTDR 0x13 +#define ZXI2C_REG_HRDR 0x14 +#define ZXI2C_REG_HTLR 0x15 +#define ZXI2C_REG_HRLR 0x16 +#define ZXI2C_REG_HWCNTR 0x18 +#define ZXI2C_REG_HRCNTR 0x19 + +#define ZXI2C_REG_CDR 0x0A +#define ZXI2C_REG_TR 0x0C +#define ZXI2C_REG_MCR 0x0E + +struct zxi2c { + struct i2c_adapter adapter; + struct completion complete; + struct device *dev; + void __iomem *base; + struct clk *clk; + u16 tcr; + int irq; + u16 cmd_status; + u16 tr; + u16 mcr; + u16 csr; + u8 fstp; + u8 hrv; +}; + +/* parameters Constants */ +#define ZXI2C_GOLD_FSTP_100K 0xF3 +#define ZXI2C_GOLD_FSTP_400K 0x38 +#define ZXI2C_GOLD_FSTP_1M 0x13 +#define ZXI2C_GOLD_FSTP_3400K 0x37 + +#define ZXI2C_HS_MASTER_CODE (0x08 << 8) +#define ZXI2C_FIFO_SIZE 32 + +static int zxi2c_wait_bus_ready(struct zxi2c *i2c) +{ + unsigned long timeout; + void __iomem *base = i2c->base; + u16 tmp; + + timeout = jiffies + msecs_to_jiffies(200); + while (!(readw(base + ZXI2C_REG_CSR) & ZXI2C_CSR_READY_MASK)) { + if (time_after(jiffies, timeout)) { + dev_warn(i2c->dev, "timeout waiting for bus ready\n"); + return -EBUSY; + } + tmp = ioread16(i2c->base + ZXI2C_REG_CR); + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + + msleep(20); + } + + return 0; +} + +static int zxi2c_wait_status(struct zxi2c *i2c, u8 status) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies(500)); + if (time_left <= 1) + return -ETIMEDOUT; + + if (i2c->cmd_status & status) + return 0; + + return -EIO; +} + +static irqreturn_t zxi2c_isr(int irq, void *data) +{ + struct zxi2c *i2c = data; + + /* save the status and write-clear it */ + i2c->cmd_status = readw(i2c->base + ZXI2C_REG_ISR); + if (!i2c->cmd_status) + return IRQ_NONE; + + writew(i2c->cmd_status, i2c->base + ZXI2C_REG_ISR); + + complete(&i2c->complete); + + return IRQ_HANDLED; +} + +static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, bool last) +{ + u16 val, tcr_val = i2c->tcr; + int xfer_len = 0; + void __iomem *base = i2c->base; + + writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); + reinit_completion(&i2c->complete); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + xfer_len++; + + val = readw(base + ZXI2C_REG_CSR); + if (val & ZXI2C_CSR_RCV_NOT_ACK) { + dev_dbg(i2c->dev, "write RCV NACK error\n"); + return -EIO; + } + + if (msg->len == 0) { + val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; + writew(val, base + ZXI2C_REG_CR); + break; + } + + if (xfer_len == msg->len) { + if (last) + writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); + } else { + writew(msg->buf[xfer_len] & 0xFF, base + ZXI2C_REG_CDR); + writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); + } + } + + return 0; +} + +static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) +{ + u16 val, tcr_val = i2c->tcr; + u32 xfer_len = 0; + void __iomem *base = i2c->base; + + val = readw(base + ZXI2C_REG_CR); + val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END); + + if (msg->len == 1) + val |= ZXI2C_CR_RX_END; + + writew(val, base + ZXI2C_REG_CR); + + reinit_completion(&i2c->complete); + + tcr_val |= ZXI2C_TCR_MASTER_READ | msg->addr; + + writew(tcr_val, base + ZXI2C_REG_TCR); + + if (!first) { + val = readw(base + ZXI2C_REG_CR); + val |= ZXI2C_CR_CPU_RDY; + writew(val, base + ZXI2C_REG_CR); + } + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + msg->buf[xfer_len] = readw(base + ZXI2C_REG_CDR) >> 8; + xfer_len++; + + val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; + if (xfer_len == msg->len - 1) + val |= ZXI2C_CR_RX_END; + writew(val, base + ZXI2C_REG_CR); + } + + return 0; +} + +static int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct i2c_msg *msg; + int i; + int ret = 0; + struct zxi2c *i2c = i2c_get_adapdata(adap); + + for (i = 0; ret >= 0 && i < num; i++) { + msg = &msgs[i]; + if (msg->len == 0) { + dev_dbg(i2c->dev, "zero len unsupported\n"); + return -ENODEV; + } + if (msg->flags & I2C_M_RD) + ret = zxi2c_read(i2c, msg, i == 0); + else + ret = zxi2c_write(i2c, msg, i == (num - 1)); + } + + return (ret < 0) ? ret : i; +} + +static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msg) +{ + u16 xfered_len = 0; + u16 byte_left = msg->len; + u16 tcr_val = i2c->tcr; + void __iomem *base = i2c->base; + bool read = !!(msg->flags & I2C_M_RD); + + while (byte_left) { + u16 i; + u8 tmp; + int error; + u16 xfer_len = min_t(u16, byte_left, ZXI2C_FIFO_SIZE); + + byte_left -= xfer_len; + + /* reset fifo buffer */ + tmp = ioread8(base + ZXI2C_REG_HCR); + iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); + + /* set xfer len */ + if (read) { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HRLR); + } else { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HTLR); + /* set write data */ + for (i = 0; i < xfer_len; i++) + iowrite8(msg->buf[xfered_len + i], base + ZXI2C_REG_HTDR); + } + + /* prepare to stop transmission */ + if (i2c->hrv && !byte_left) { + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; + iowrite8(tmp, base + ZXI2C_REG_CR); + } + + reinit_completion(&i2c->complete); + + if (xfered_len) { + /* continue transmission */ + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + iowrite8(tmp |= ZXI2C_CR_CPU_RDY, i2c->base + ZXI2C_REG_CR); + } else { + /* start transmission */ + tcr_val |= (read ? ZXI2C_TCR_MASTER_READ : 0); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + } + + error = zxi2c_wait_status(i2c, ZXI2C_IRQ_FIFOEND); + if (error) + return error; + + /* get the received data */ + if (read) + for (i = 0; i < xfer_len; i++) + msg->buf[xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); + + xfered_len += xfer_len; + } + + return 1; +} + +static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + u8 tmp; + int ret; + struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap); + + ret = zxi2c_wait_bus_ready(i2c); + if (ret) + return ret; + + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END); + + if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) { + /* enable fifo mode */ + iowrite16(ZXI2C_CR_FIFO_MODE | tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable fifo irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_fifo_xfer(i2c, msgs); + } else { + /* enable byte mode */ + iowrite16(tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable byte irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_xfer(adap, msgs, num); + if (ret < 0) + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + /* make sure the state machine is stopped */ + usleep_range(1, 2); + } + /* dis interrupt */ + iowrite8(0, i2c->base + ZXI2C_REG_IMR); + + /* timeout may caused by another high-priority process, try again */ + if (ret == -ETIMEDOUT) + ret = -EAGAIN; + + return ret; +} + +static u32 zxi2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm zxi2c_algorithm = { + .master_xfer = zxi2c_master_xfer, + .functionality = zxi2c_func, +}; + +static const struct i2c_adapter_quirks zxi2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static const u32 zxi2c_speed_params_table[][3] = { + /* speed, ZXI2C_TCR, ZXI2C_FSTP */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, ZXI2C_GOLD_FSTP_100K }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M }, + { I2C_MAX_HIGH_SPEED_MODE_FREQ, ZXI2C_TCR_HS_MODE | ZXI2C_TCR_FAST, + ZXI2C_GOLD_FSTP_3400K }, + /* never reached, keep for debug. freq src is 27M mode */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, 0x83 }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, 0x1e }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, 10 } +}; + +static void zxi2c_set_bus_speed(struct zxi2c *i2c) +{ + iowrite16(i2c->tr, i2c->base + ZXI2C_REG_TR); + iowrite8(ZXI2C_CLK_50M, i2c->base + ZXI2C_REG_CLK); + iowrite16(i2c->mcr, i2c->base + ZXI2C_REG_MCR); +} + +static void zxi2c_get_bus_speed(struct zxi2c *i2c) +{ + u8 i, count; + u8 fstp; + const u32 *params; + + u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev); + + count = ARRAY_SIZE(zxi2c_speed_params_table); + for (i = 0; i < count; i++) + if (acpi_speed == zxi2c_speed_params_table[i][0]) + break; + /* if not found, use 400k as default */ + i = i < count ? i : 1; + + params = zxi2c_speed_params_table[i]; + fstp = ioread8(i2c->base + ZXI2C_REG_TR); + if (abs(fstp - params[2]) > 0x10) { + /* + * if BIOS setting value far from golden value, + * use golden value and warn user + */ + dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", + params[0], fstp, params[2]); + i2c->tr = params[2] | 0xff00; + } else + i2c->tr = fstp | 0xff00; + + i2c->tcr = params[1]; + i2c->mcr = ioread16(i2c->base + ZXI2C_REG_MCR); + /* for Hs-mode, use 0000 1000 as master code */ + if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ) + i2c->mcr |= ZXI2C_HS_MASTER_CODE; + + dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0])); +} + +static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c) +{ + int err; + struct zxi2c *i2c; + struct resource *res; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "IORESOURCE_MEM failed\n"); + return -ENODEV; + } + i2c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->base)) + return PTR_ERR(i2c->base); + + i2c->irq = platform_get_irq(pdev, 0); + if (i2c->irq < 0) + return i2c->irq; + + err = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr, IRQF_SHARED, pdev->name, i2c); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to request irq %i\n", i2c->irq); + + i2c->dev = &pdev->dev; + init_completion(&i2c->complete); + platform_set_drvdata(pdev, i2c); + + *pi2c = i2c; + return 0; +} + +static int zxi2c_probe(struct platform_device *pdev) +{ + int error; + struct zxi2c *i2c; + struct i2c_adapter *adap; + + error = zxi2c_init(pdev, &i2c); + if (error) + return error; + + zxi2c_get_bus_speed(i2c); + zxi2c_set_bus_speed(i2c); + i2c->hrv = ioread8(i2c->base + ZXI2C_REG_REV); + + adap = &i2c->adapter; + adap->owner = THIS_MODULE; + adap->algo = &zxi2c_algorithm; + adap->retries = 2; + adap->quirks = &zxi2c_quirks; + + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(i2c->dev)); + i2c_set_adapdata(adap, i2c); + + error = i2c_add_adapter(adap); + if (error) + return error; + + dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", + adap->nr, DRIVER_VERSION); + + return 0; +} + +static int zxi2c_remove(struct platform_device *pdev) +{ + struct zxi2c *i2c = platform_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, i2c->irq, i2c); + + i2c_del_adapter(&i2c->adapter); + + platform_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, i2c); + + return 0; +} + +static int zxi2c_resume(struct device *dev) +{ + struct zxi2c *i2c = dev_get_drvdata(dev); + + iowrite8(ZXI2C_CR_MST_RST, i2c->base + ZXI2C_REG_CR); + zxi2c_set_bus_speed(i2c); + + return 0; +} + +static const struct dev_pm_ops zxi2c_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, zxi2c_resume) +}; + +static const struct acpi_device_id zxi2c_acpi_match[] = { + {"IIC1D17", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); + +static struct platform_driver zxi2c_driver = { + .probe = zxi2c_probe, + .remove = zxi2c_remove, + .driver = { + .name = ZX_I2C_NAME, + .acpi_match_table = ACPI_PTR(zxi2c_acpi_match), + .pm = &zxi2c_pm, + }, +}; + +module_platform_driver(zxi2c_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR("HansHu@zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver"); +MODULE_LICENSE("GPL"); -- Gitee From 6a54ff1e75d277aee7a81b549480ac282ded46c6 Mon Sep 17 00:00:00 2001 From: Xingrui Yi Date: Tue, 9 Jan 2024 11:11:13 +0800 Subject: [PATCH 028/953] anolis: config: change nr cpus to 1024 ANBZ: #7888 On X86 platform, the config NR_CPUS is 64, which is too small for server. NR_CPUS should be change to 1024 which is same in 5.10. Signed-off-by: Xingrui Yi Reviewed-by: Qiao Ma Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/35 --- arch/x86/configs/anolis_defconfig | 5 +++-- lib/Kconfig | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 3c8b51687fb7..4fb6c9d9d8b7 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -408,9 +408,9 @@ CONFIG_DMI=y CONFIG_BOOT_VESA_SUPPORT=y # CONFIG_MAXSMP is not set CONFIG_NR_CPUS_RANGE_BEGIN=2 -CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 -CONFIG_NR_CPUS=64 +CONFIG_NR_CPUS=1024 CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y @@ -7263,6 +7263,7 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y diff --git a/lib/Kconfig b/lib/Kconfig index c686f4adc124..777ba070bec7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -532,7 +532,7 @@ config CHECK_SIGNATURE bool config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + bool "Force CPU masks off stack" help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids -- Gitee From 81efa502992e25ce0253548aa50939b455cca65d Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:42 -0800 Subject: [PATCH 029/953] perf/x86/intel/cstate: Cleanup duplicate attr_groups ANBZ: #8007 commit 243218ca93037631f0224fdbefea045912cb761a upstream. The events of the cstate_core and cstate_pkg PMU have the same format. They both need to create a "events" group (with empty attrs). The attr_groups can be shared. Remove the dedicated attr_groups for each cstate PMU. Use the shared cstate_attr_groups to replace. Intel-SIG: commit 243218ca9303 perf/x86/intel/cstate: Cleanup duplicate attr_groups Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 44 +++++++++------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 96fffb2d521d..934cd7103891 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -189,20 +189,20 @@ static struct attribute *attrs_empty[] = { * "events" group (with empty attrs) before updating * it with detected events. */ -static struct attribute_group core_events_attr_group = { +static struct attribute_group cstate_events_attr_group = { .name = "events", .attrs = attrs_empty, }; -DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); -static struct attribute *core_format_attrs[] = { - &format_attr_core_event.attr, +DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63"); +static struct attribute *cstate_format_attrs[] = { + &format_attr_cstate_event.attr, NULL, }; -static struct attribute_group core_format_attr_group = { +static struct attribute_group cstate_format_attr_group = { .name = "format", - .attrs = core_format_attrs, + .attrs = cstate_format_attrs, }; static cpumask_t cstate_core_cpu_mask; @@ -217,9 +217,9 @@ static struct attribute_group cpumask_attr_group = { .attrs = cstate_cpumask_attrs, }; -static const struct attribute_group *core_attr_groups[] = { - &core_events_attr_group, - &core_format_attr_group, +static const struct attribute_group *cstate_attr_groups[] = { + &cstate_events_attr_group, + &cstate_format_attr_group, &cpumask_attr_group, NULL, }; @@ -268,30 +268,8 @@ static struct perf_msr pkg_msr[] = { [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, }; -static struct attribute_group pkg_events_attr_group = { - .name = "events", - .attrs = attrs_empty, -}; - -DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); -static struct attribute *pkg_format_attrs[] = { - &format_attr_pkg_event.attr, - NULL, -}; -static struct attribute_group pkg_format_attr_group = { - .name = "format", - .attrs = pkg_format_attrs, -}; - static cpumask_t cstate_pkg_cpu_mask; -static const struct attribute_group *pkg_attr_groups[] = { - &pkg_events_attr_group, - &pkg_format_attr_group, - &cpumask_attr_group, - NULL, -}; - static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -475,7 +453,7 @@ static const struct attribute_group *pkg_attr_update[] = { }; static struct pmu cstate_core_pmu = { - .attr_groups = core_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, .name = "cstate_core", .task_ctx_nr = perf_invalid_context, @@ -490,7 +468,7 @@ static struct pmu cstate_core_pmu = { }; static struct pmu cstate_pkg_pmu = { - .attr_groups = pkg_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = pkg_attr_update, .name = "cstate_pkg", .task_ctx_nr = perf_invalid_context, -- Gitee From a59153cf5ab2850826b1781546b0ff2a536a4542 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:43 -0800 Subject: [PATCH 030/953] x86/smp: Export symbol cpu_clustergroup_mask() ANBZ: #8007 commit c3dd1995620cdcd65cf4944c4164b0dbc16e557c upstream. Intel cstate PMU driver will invoke the topology_cluster_cpumask() to retrieve the CPU mask of a cluster. A modpost error is triggered since the symbol cpu_clustergroup_mask is not exported. Intel-SIG: commit c3dd1995620c x86/smp: Export symbol cpu_clustergroup_mask() Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/kernel/smpboot.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2a187c0cbd5b..54125b4cc46a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -747,6 +747,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu) { return cpu_l2c_shared_mask(cpu); } +EXPORT_SYMBOL_GPL(cpu_clustergroup_mask); static void impress_friends(void) { -- Gitee From 49d194cd28d7eddea01724ce8ff3eda501f6e7e3 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:44 -0800 Subject: [PATCH 031/953] perf/x86/intel/cstate: Add Sierra Forest support ANBZ: #8007 commit 3877d55a0db2688c2e4ab8a319614a0c81f8e2d2 upstream. A new module C6 Residency Counter is introduced in the Sierra Forest. The scope of the new counter is module (A cluster of cores shared L2 cache). Create a brand new cstate_module PMU to profile the new counter. The only differences between the new cstate_module PMU and the existing cstate PMU are the scope and events. Regarding the choice of the new cstate_module PMU name, the current naming rule of a cstate PMU is "cstate_" + the scope of the PMU. The scope of the PMU is the cores shared L2. On SRF, Intel calls it "module", while the internal Linux sched code calls it "cluster". The "cstate_module" is used as the new PMU name, because - The Cstate PMU driver is a Intel specific driver. It doesn't impact other ARCHs. The name makes it consistent with the documentation. - The "cluster" mainly be used by the scheduler developer, while the user of cstate PMU is more likely a researcher reading HW docs and optimizing power. - In the Intel's SDM, the "cluster" has a different meaning/scope for topology. Using it will mislead the end users. Besides the module C6, the core C1/C6 and pkg C6 residency counters are supported in the Sierra Forest as well. Intel-SIG: commit 3877d55a0db2 perf/x86/intel/cstate: Add Sierra Forest support Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Suggested-by: Artem Bityutskiy Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 113 +++++++++++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 934cd7103891..f2651a251c02 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL + * MTL,SRF * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -75,7 +75,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -97,6 +97,10 @@ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, * TNT,RKL,ADL,RPL,MTL * Scope: Package (physical package) + * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. + * perf code: 0x00 + * Available model: SRF + * Scope: A cluster of cores shared L2 cache * */ @@ -130,6 +134,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, struct cstate_model { unsigned long core_events; unsigned long pkg_events; + unsigned long module_events; unsigned long quirks; }; @@ -270,6 +275,28 @@ static struct perf_msr pkg_msr[] = { static cpumask_t cstate_pkg_cpu_mask; +/* cstate_module PMU */ +static struct pmu cstate_module_pmu; +static bool has_cstate_module; + +enum perf_cstate_module_events { + PERF_CSTATE_MODULE_C6_RES = 0, + + PERF_CSTATE_MODULE_EVENT_MAX, +}; + +PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00"); + +static unsigned long module_msr_mask; + +PMU_EVENT_GROUP(events, cstate_module_c6); + +static struct perf_msr module_msr[] = { + [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, +}; + +static cpumask_t cstate_module_cpu_mask; + static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -280,6 +307,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); else if (pmu == &cstate_pkg_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); + else if (pmu == &cstate_module_pmu) + return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); else return 0; } @@ -317,6 +346,15 @@ static int cstate_pmu_event_init(struct perf_event *event) event->hw.event_base = pkg_msr[cfg].msr; cpu = cpumask_any_and(&cstate_pkg_cpu_mask, topology_die_cpumask(event->cpu)); + } else if (event->pmu == &cstate_module_pmu) { + if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) + return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX); + if (!(module_msr_mask & (1 << cfg))) + return -EINVAL; + event->hw.event_base = module_msr[cfg].msr; + cpu = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(event->cpu)); } else { return -ENOENT; } @@ -404,6 +442,17 @@ static int cstate_cpu_exit(unsigned int cpu) perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); } } + + if (has_cstate_module && + cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { + + target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); + /* Migrate events if there is a valid target */ + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &cstate_module_cpu_mask); + perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); + } + } return 0; } @@ -430,6 +479,15 @@ static int cstate_cpu_init(unsigned int cpu) if (has_cstate_pkg && target >= nr_cpu_ids) cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); + /* + * If this is the first online thread of that cluster, set it + * in the cluster cpu mask as the designated reader. + */ + target = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(cpu)); + if (has_cstate_module && target >= nr_cpu_ids) + cpumask_set_cpu(cpu, &cstate_module_cpu_mask); + return 0; } @@ -452,6 +510,11 @@ static const struct attribute_group *pkg_attr_update[] = { NULL, }; +static const struct attribute_group *module_attr_update[] = { + &group_cstate_module_c6, + NULL +}; + static struct pmu cstate_core_pmu = { .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, @@ -482,6 +545,21 @@ static struct pmu cstate_pkg_pmu = { .module = THIS_MODULE, }; +static struct pmu cstate_module_pmu = { + .attr_groups = cstate_attr_groups, + .attr_update = module_attr_update, + .name = "cstate_module", + .task_ctx_nr = perf_invalid_context, + .event_init = cstate_pmu_event_init, + .add = cstate_pmu_event_add, + .del = cstate_pmu_event_del, + .start = cstate_pmu_event_start, + .stop = cstate_pmu_event_stop, + .read = cstate_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .module = THIS_MODULE, +}; + static const struct cstate_model nhm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -596,6 +674,15 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model srf_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), @@ -648,6 +735,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), @@ -689,10 +777,14 @@ static int __init cstate_probe(const struct cstate_model *cm) pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, true, (void *) &cm->pkg_events); + module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX, + true, (void *) &cm->module_events); + has_cstate_core = !!core_msr_mask; has_cstate_pkg = !!pkg_msr_mask; + has_cstate_module = !!module_msr_mask; - return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; + return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV; } static inline void cstate_cleanup(void) @@ -705,6 +797,9 @@ static inline void cstate_cleanup(void) if (has_cstate_pkg) perf_pmu_unregister(&cstate_pkg_pmu); + + if (has_cstate_module) + perf_pmu_unregister(&cstate_module_pmu); } static int __init cstate_init(void) @@ -741,6 +836,16 @@ static int __init cstate_init(void) return err; } } + + if (has_cstate_module) { + err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1); + if (err) { + has_cstate_module = false; + pr_info("Failed to register cstate cluster pmu\n"); + cstate_cleanup(); + return err; + } + } return 0; } -- Gitee From 6aaa70225cef6557f3721ebb64635d5ed98ed6b4 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:45 -0800 Subject: [PATCH 032/953] perf/x86/intel/cstate: Add Grand Ridge support ANBZ: #8007 commit bbb968696d0f3442ab823598def3b756cf4735c6 upstream. The same as the Sierra Forest, the Grand Ridge supports core C1/C6 and module C6. But it doesn't support pkg C6 residency counter. Intel-SIG: commit bbb968696d0f perf/x86/intel/cstate: Add Grand Ridge support Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index f2651a251c02..6b65ee785da9 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL,SRF + * MTL,SRF,GRR * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,8 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, + * GRR * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -99,7 +100,7 @@ * Scope: Package (physical package) * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. * perf code: 0x00 - * Available model: SRF + * Available model: SRF,GRR * Scope: A cluster of cores shared L2 cache * */ @@ -674,6 +675,13 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model grr_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct cstate_model srf_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -736,6 +744,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), -- Gitee From 11d3b2851c7f535b86a621a864eff11b93dd4326 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 23 Dec 2023 11:40:56 +0800 Subject: [PATCH 033/953] sched: Add cpus_share_resources API ANBZ: #8001 commit b95303e0aeaf446b65169dd4142cacdaeb7d4c8b upstream. Add cpus_share_resources() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. On a machine with clusters cpus_share_resources() will test whether two cpus are within the same cluster. On a non-cluster machine it will behaves the same as cpus_share_cache(). So we use "resources" here for cache resources. Intel-SIG: commit b95303e0aeaf sched: Add cpus_share_resources API. Cluster based task wakeup optimization backport. Signed-off-by: Barry Song Signed-off-by: Yicong Yang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Gautham R. Shenoy Reviewed-by: Tim Chen Reviewed-by: Vincent Guittot Tested-and-reviewed-by: Chen Yu Tested-by: K Prateek Nayak Link: https://lkml.kernel.org/r/20231019033323.54147-2-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- include/linux/sched/sd_flags.h | 7 +++++++ include/linux/sched/topology.h | 8 +++++++- kernel/sched/core.c | 12 ++++++++++++ kernel/sched/sched.h | 1 + kernel/sched/topology.c | 13 +++++++++++++ 5 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index fad77b5172e2..a8b28647aafc 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) */ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) +/* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + /* * Domain members share CPU package resources (i.e. caches) * diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 67b573d5bf28..4c14fe127223 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif @@ -179,6 +179,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_resources(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +233,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +static inline bool cpus_share_resources(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1f91e2c12731..de44275a2b56 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3960,6 +3960,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } +/* + * Whether CPUs are share cache resources, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_resources(int this_cpu, int that_cpu) +{ + if (this_cpu == that_cpu) + return true; + + return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); +} + static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 04846272409c..f64f0d0df273 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1868,6 +1868,7 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_share_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 423d08947962..8e3d6bea306e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -666,6 +666,7 @@ static void destroy_sched_domains(struct sched_domain *sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_share_id); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); @@ -691,6 +692,17 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) = id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); + sd = lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id = cpumask_first(sched_domain_span(sd)); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_share_id, cpu) = id; + sd = lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); @@ -1548,6 +1560,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) -- Gitee From de51c72fe82fdaaa4d89e79e963b3341294b62d8 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 23 Dec 2023 11:51:13 +0800 Subject: [PATCH 034/953] sched/fair: Scan cluster before scanning LLC in wake-up path ANBZ: #8001 commit 8881e1639f1f899b64e9bccf6cc14d51c1d3c822 upstream. For platforms having clusters like Kunpeng920, CPUs within the same cluster have lower latency when synchronizing and accessing shared resources like cache. Thus, this patch tries to find an idle cpu within the cluster of the target CPU before scanning the whole LLC to gain lower latency. This will be implemented in 2 steps in select_idle_sibling(): 1. When the prev_cpu/recent_used_cpu are good wakeup candidates, use them if they're sharing cluster with the target CPU. Otherwise trying to scan for an idle CPU in the target's cluster. 2. Scanning the cluster prior to the LLC of the target CPU for an idle CPU to wakeup. Testing has been done on Kunpeng920 by pinning tasks to one numa and two numa. On Kunpeng920, Each numa has 8 clusters and each cluster has 4 CPUs. With this patch, We noticed enhancement on tbench and netperf within one numa or cross two numa on top of tip-sched-core commit 9b46f1abc6d4 ("sched/debug: Print 'tgid' in sched_show_task()") tbench results (node 0): baseline patched 1: 327.2833 372.4623 ( 13.80%) 4: 1320.5933 1479.8833 ( 12.06%) 8: 2638.4867 2921.5267 ( 10.73%) 16: 5282.7133 5891.5633 ( 11.53%) 32: 9810.6733 9877.3400 ( 0.68%) 64: 7408.9367 7447.9900 ( 0.53%) 128: 6203.2600 6191.6500 ( -0.19%) tbench results (node 0-1): baseline patched 1: 332.0433 372.7223 ( 12.25%) 4: 1325.4667 1477.6733 ( 11.48%) 8: 2622.9433 2897.9967 ( 10.49%) 16: 5218.6100 5878.2967 ( 12.64%) 32: 10211.7000 11494.4000 ( 12.56%) 64: 13313.7333 16740.0333 ( 25.74%) 128: 13959.1000 14533.9000 ( 4.12%) netperf results TCP_RR (node 0): baseline patched 1: 76546.5033 90649.9867 ( 18.42%) 4: 77292.4450 90932.7175 ( 17.65%) 8: 77367.7254 90882.3467 ( 17.47%) 16: 78519.9048 90938.8344 ( 15.82%) 32: 72169.5035 72851.6730 ( 0.95%) 64: 25911.2457 25882.2315 ( -0.11%) 128: 10752.6572 10768.6038 ( 0.15%) netperf results TCP_RR (node 0-1): baseline patched 1: 76857.6667 90892.2767 ( 18.26%) 4: 78236.6475 90767.3017 ( 16.02%) 8: 77929.6096 90684.1633 ( 16.37%) 16: 77438.5873 90502.5787 ( 16.87%) 32: 74205.6635 88301.5612 ( 19.00%) 64: 69827.8535 71787.6706 ( 2.81%) 128: 25281.4366 25771.3023 ( 1.94%) netperf results UDP_RR (node 0): baseline patched 1: 96869.8400 110800.8467 ( 14.38%) 4: 97744.9750 109680.5425 ( 12.21%) 8: 98783.9863 110409.9637 ( 11.77%) 16: 99575.0235 110636.2435 ( 11.11%) 32: 95044.7250 97622.8887 ( 2.71%) 64: 32925.2146 32644.4991 ( -0.85%) 128: 12859.2343 12824.0051 ( -0.27%) netperf results UDP_RR (node 0-1): baseline patched 1: 97202.4733 110190.1200 ( 13.36%) 4: 95954.0558 106245.7258 ( 10.73%) 8: 96277.1958 105206.5304 ( 9.27%) 16: 97692.7810 107927.2125 ( 10.48%) 32: 79999.6702 103550.2999 ( 29.44%) 64: 80592.7413 87284.0856 ( 8.30%) 128: 27701.5770 29914.5820 ( 7.99%) Note neither Kunpeng920 nor x86 Jacobsville supports SMT, so the SMT branch in the code has not been tested but it supposed to work. Chen Yu also noticed this will improve the performance of tbench and netperf on a 24 CPUs Jacobsville machine, there are 4 CPUs in one cluster sharing L2 Cache. Intel-SIG: commit 8881e1639f1f sched/fair: Scan cluster before scanning LLC in wake-up path. Cluster based task wakeup optimization backport. [https://lore.kernel.org/lkml/Ytfjs+m1kUs0ScSn@worktop.programming.kicks-ass.net] Suggested-by: Peter Zijlstra Signed-off-by: Barry Song Signed-off-by: Yicong Yang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tim Chen Reviewed-by: Chen Yu Reviewed-by: Gautham R. Shenoy Reviewed-by: Vincent Guittot Tested-and-reviewed-by: Chen Yu Tested-by: Yicong Yang Link: https://lkml.kernel.org/r/20231019033323.54147-3-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++---- kernel/sched/sched.h | 1 + kernel/sched/topology.c | 12 ++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69fe62126a28..f835680f2be8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7309,6 +7309,30 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool } } + if (static_branch_unlikely(&sched_cluster_active)) { + struct sched_group *sg = sd->groups; + + if (sg->flags & SD_CLUSTER) { + for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i = select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <= 0) + return -1; + idle_cpu = __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_group_span(sg)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i = select_idle_core(p, cpu, cpus, &idle_cpu); @@ -7316,7 +7340,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool return i; } else { - if (!--nr) + if (--nr <= 0) return -1; idle_cpu = __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -7445,8 +7469,12 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_cpu(task_util, util_min, util_max, prev)) - return prev; + asym_fits_cpu(task_util, util_min, util_max, prev)) { + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(prev, target)) + return prev; + } /* * Allow a per-cpu kthread to stack with the wakee if the @@ -7473,7 +7501,11 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { - return recent_used_cpu; + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(recent_used_cpu, target)) + return recent_used_cpu; + } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f64f0d0df273..1a02d4c602b1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1874,6 +1874,7 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; static __always_inline bool sched_asym_cpucap_active(void) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8e3d6bea306e..53b9ab660257 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -671,7 +671,9 @@ DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); static void update_top_cache_domain(int cpu) { @@ -2379,6 +2381,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct rq *rq = NULL; int i, ret = -ENOMEM; bool has_asym = false; + bool has_cluster = false; if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2504,12 +2507,18 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); cpu_attach_domain(sd, d.rd, i); + + if (lowest_flag_domain(i, SD_CLUSTER)) + has_cluster = true; } rcu_read_unlock(); if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2609,6 +2618,9 @@ static void detach_destroy_domains(const struct cpumask *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); + if (static_branch_unlikely(&sched_cluster_active)) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); -- Gitee From 44e01bbac1c279840e654b33ef849c15b8bcbfe2 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 19 Oct 2023 11:33:23 +0800 Subject: [PATCH 035/953] sched/fair: Use candidate prev/recent_used CPU if scanning failed for cluster wakeup ANBZ: #8001 commit 22165f61d0c4092adf40f967c899e5d8b8a0d703 upstream. Chen Yu reports a hackbench regression of cluster wakeup when hackbench threads equal to the CPU number [1]. Analysis shows it's because we wake up more on the target CPU even if the prev_cpu is a good wakeup candidate and leads to the decrease of the CPU utilization. Generally if the task's prev_cpu is idle we'll wake up the task on it without scanning. On cluster machines we'll try to wake up the task in the same cluster of the target for better cache affinity, so if the prev_cpu is idle but not sharing the same cluster with the target we'll still try to find an idle CPU within the cluster. This will improve the performance at low loads on cluster machines. But in the issue above, if the prev_cpu is idle but not in the cluster with the target CPU, we'll try to scan an idle one in the cluster. But since the system is busy, we're likely to fail the scanning and use target instead, even if the prev_cpu is idle. Then leads to the regression. This patch solves this in 2 steps: o record the prev_cpu/recent_used_cpu if they're good wakeup candidates but not sharing the cluster with the target. o on scanning failure use the prev_cpu/recent_used_cpu if they're recorded as idle [1] https://lore.kernel.org/all/ZGzDLuVaHR1PAYDt@chenyu5-mobl1/ Intel-SIG: commit 22165f61d0c4 Use candidate prev/recent_used CPU if scanning failed for cluster wakeup. Cluster based task wakeup optimization backport Closes: https://lore.kernel.org/all/ZGsLy83wPIpamy6x@chenyu5-mobl1/ Reported-by: Chen Yu Signed-off-by: Yicong Yang Tested-and-reviewed-by: Chen Yu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20231019033323.54147-4-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- kernel/sched/fair.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f835680f2be8..9773437c4f2f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7442,7 +7442,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) bool has_idle_core = false; struct sched_domain *sd; unsigned long task_util, util_min, util_max; - int i, recent_used_cpu; + int i, recent_used_cpu, prev_aff = -1; /* * On asymmetric system, update task utilization because we will check @@ -7474,6 +7474,8 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if (!static_branch_unlikely(&sched_cluster_active) || cpus_share_resources(prev, target)) return prev; + + prev_aff = prev; } /* @@ -7506,6 +7508,8 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpus_share_resources(recent_used_cpu, target)) return recent_used_cpu; + } else { + recent_used_cpu = -1; } /* @@ -7546,6 +7550,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if ((unsigned)i < nr_cpumask_bits) return i; + /* + * For cluster machines which have lower sharing cache like L2 or + * LLC Tag, we tend to find an idle CPU in the target's cluster + * first. But prev_cpu or recent_used_cpu may also be a good candidate, + * use them if possible when no idle CPU found in select_idle_cpu(). + */ + if ((unsigned int)prev_aff < nr_cpumask_bits) + return prev_aff; + if ((unsigned int)recent_used_cpu < nr_cpumask_bits) + return recent_used_cpu; + return target; } -- Gitee From 369ec456709a90961e121d603efbbe7d8853a7d6 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:19 -0700 Subject: [PATCH 036/953] perf: Add branch stack counters ANBZ: #8006 commit 571d91dcadfa3cef499010b4eddb9b58b0da4d24 upstream. Currently, the additional information of a branch entry is stored in a u64 space. With more and more information added, the space is running out. For example, the information of occurrences of events will be added for each branch. Two places were suggested to append the counters. https://lore.kernel.org/lkml/20230802215814.GH231007@hirez.programming.kicks-ass.net/ One place is right after the flags of each branch entry. It changes the existing struct perf_branch_entry. The later ARCH specific implementation has to be really careful to consistently pick the right struct. The other place is right after the entire struct perf_branch_stack. The disadvantage is that the pointer of the extra space has to be recorded. The common interface perf_sample_save_brstack() has to be updated. The latter is much straightforward, and should be easily understood and maintained. It is implemented in the patch. Add a new branch sample type, PERF_SAMPLE_BRANCH_COUNTERS, to indicate the event which is recorded in the branch info. The "u64 counters" may store the occurrences of several events. The information regarding the number of events/counters and the width of each counter should be exposed via sysfs as a reference for the perf tool. Define the branch_counter_nr and branch_counter_width ABI here. The support will be implemented later in the Intel-specific patch. Intel-SIG: commit 571d91dcadfa perf: Add branch stack counters Backport LBR branch counter support to kernel v6.6. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- .../testing/sysfs-bus-event_source-devices-caps | 6 ++++++ arch/powerpc/perf/core-book3s.c | 2 +- arch/x86/events/amd/core.c | 2 +- arch/x86/events/core.c | 2 +- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/ds.c | 4 ++-- include/linux/perf_event.h | 17 ++++++++++++++++- include/uapi/linux/perf_event.h | 10 ++++++++++ kernel/events/core.c | 8 ++++++++ 9 files changed, 46 insertions(+), 7 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps index 8757dcf41c08..a5f506f7d481 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps @@ -16,3 +16,9 @@ Description: Example output in powerpc: grep . /sys/bus/event_source/devices/cpu/caps/* /sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9 + + The "branch_counter_nr" in the supported platform exposes the + maximum number of counters which can be shown in the u64 counters + of PERF_SAMPLE_BRANCH_COUNTERS, while the "branch_counter_width" + exposes the width of each counter. Both of them can be used by + the perf tool to parse the logged counters in each branch. diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 10b946e9c6e7..b7ff680cde96 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2312,7 +2312,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct cpu_hw_events *cpuhw; cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_read(event, cpuhw); - perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack); + perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL); } if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 5365d6acbf09..1b24c0ff5d8b 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -939,7 +939,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) continue; if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 185f902e5f28..2919bb5a53a0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1702,7 +1702,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index bc4fcf0d9405..6037c11fd101 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3058,7 +3058,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index eb8dd8b8a1e8..7566190389f0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1755,7 +1755,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, setup_pebs_time(event, data, pebs->tsc); if (has_branch_stack(event)) - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } static void adaptive_pebs_save_regs(struct pt_regs *regs, @@ -1912,7 +1912,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e846f87e2d09..247871bcd71f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1143,6 +1143,10 @@ static inline bool branch_sample_priv(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } +static inline bool branch_sample_counters(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; +} struct perf_sample_data { /* @@ -1177,6 +1181,7 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; @@ -1254,7 +1259,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, - struct perf_branch_stack *brs) + struct perf_branch_stack *brs, + u64 *brs_cntr) { int size = sizeof(u64); /* nr */ @@ -1262,7 +1268,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); + /* + * The extension space for counters is appended after the + * struct perf_branch_stack. It is used to store the occurrences + * of events of each branch. + */ + if (brs_cntr) + size += brs->nr * sizeof(u64); + data->br_stack = brs; + data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250dd1b..4461f380425b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi diff --git a/kernel/events/core.c b/kernel/events/core.c index fe543e7898f5..6da3f590eb20 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7385,6 +7385,14 @@ void perf_output_sample(struct perf_output_handle *handle, if (branch_sample_hw_index(event)) perf_output_put(handle, data->br_stack->hw_idx); perf_output_copy(handle, data->br_stack->entries, size); + /* + * Add the extension space which is appended + * right after the struct perf_branch_stack. + */ + if (data->br_stack_cntr) { + size = data->br_stack->nr * sizeof(u64); + perf_output_copy(handle, data->br_stack_cntr, size); + } } else { /* * we always store at least the value of nr -- Gitee From 7f1f50f70462f346f8a4744120a2179c8e5965ca Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:20 -0700 Subject: [PATCH 037/953] perf/x86: Add PERF_X86_EVENT_NEEDS_BRANCH_STACK flag ANBZ: #8006 commit 85846b27072defc7ab3dcee7ff36563a040079dc upstream. Currently, branch_sample_type !=0 is used to check whether a branch stack setup is required. But it doesn't check the sample type, unnecessary branch stack setup may be done for a counting event. E.g., perf record -e "{branch-instructions,branch-misses}:S" -j any Also, the event only with the new PERF_SAMPLE_BRANCH_COUNTERS branch sample type may not require a branch stack setup either. Add a new flag NEEDS_BRANCH_STACK to indicate whether the event requires a branch stack setup. Replace the needs_branch_stack() by checking the new flag. The counting event check is implemented here. The later patch will take the new PERF_SAMPLE_BRANCH_COUNTERS into account. Intel-SIG: commit 85846b27072d perf/x86: Add PERF_X86_EVENT_NEEDS_BRANCH_STACK flag Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 14 +++++++++++--- arch/x86/events/perf_event_flags.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6037c11fd101..aadc4457f0db 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2518,9 +2518,14 @@ static void intel_pmu_assign_event(struct perf_event *event, int idx) perf_report_aux_output_id(event, idx); } +static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; +} + static void intel_pmu_del_event(struct perf_event *event) { - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_del(event); if (event->attr.precise_ip) intel_pmu_pebs_del(event); @@ -2831,7 +2836,7 @@ static void intel_pmu_add_event(struct perf_event *event) { if (event->attr.precise_ip) intel_pmu_pebs_add(event); - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_add(event); } @@ -3908,7 +3913,10 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); } - if (needs_branch_stack(event)) { + if (needs_branch_stack(event) && is_sampling_event(event)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) return ret; diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index 1dc19b9b4426..a1685981c520 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -20,3 +20,4 @@ PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ +PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ -- Gitee From 1b17dfa4aea87d713f12552e4b1455f35ef0a059 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:21 -0700 Subject: [PATCH 038/953] perf: Add branch_sample_call_stack ANBZ: #8006 commit 1f2376cd03dd3b965d130ed46a7c92769d614ba1 upstream. Add a helper function to check call stack sample type. The later patch will invoke the function in several places. Intel-SIG: commit 1f2376cd03dd perf: Add branch_sample_call_stack Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/core.c | 2 +- include/linux/perf_event.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 2919bb5a53a0..b56e9c3a0cc8 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -601,7 +601,7 @@ int x86_pmu_hw_config(struct perf_event *event) } } - if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) + if (branch_sample_call_stack(event)) event->attach_state |= PERF_ATTACH_TASK_DATA; /* diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 247871bcd71f..b22a10fe04fd 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1148,6 +1148,11 @@ static inline bool branch_sample_counters(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; } +static inline bool branch_sample_call_stack(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} + struct perf_sample_data { /* * Fields set by perf_sample_data_init() unconditionally, -- Gitee From fb3d84f802425865d7c09c4fe814de96f0d31c72 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:22 -0700 Subject: [PATCH 039/953] perf/x86/intel: Reorganize attrs and is_visible ANBZ: #8006 commit 318c4985911245508f7e0bab5265e208a38b5f18 upstream. Some attrs and is_visible implementations are rather far away from one another which makes the whole thing hard to interpret. There are only two attribute groups which have both .attrs and .is_visible, group_default and group_caps_lbr. Move them together. No functional changes. Intel-SIG: commit 318c49859112 perf/x86/intel: Reorganize attrs and is_visible Backport LBR branch counter support to kernel v6.6. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index aadc4457f0db..38335d60f31f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5481,6 +5481,12 @@ static struct attribute *lbr_attrs[] = { NULL }; +static umode_t +lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return x86_pmu.lbr_nr ? attr->mode : 0; +} + static char pmu_name_str[30]; static ssize_t pmu_name_show(struct device *cdev, @@ -5507,6 +5513,15 @@ static struct attribute *intel_pmu_attrs[] = { NULL, }; +static umode_t +default_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + if (attr == &dev_attr_allow_tsx_force_abort.attr) + return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; + + return attr->mode; +} + static umode_t tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -5528,27 +5543,12 @@ mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) return pebs_is_visible(kobj, attr, i); } -static umode_t -lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - return x86_pmu.lbr_nr ? attr->mode : 0; -} - static umode_t exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) { return x86_pmu.version >= 2 ? attr->mode : 0; } -static umode_t -default_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - if (attr == &dev_attr_allow_tsx_force_abort.attr) - return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; - - return attr->mode; -} - static struct attribute_group group_events_td = { .name = "events", }; -- Gitee From e7d793f1db41b2241f71b48aecbd0541d4807f96 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:23 -0700 Subject: [PATCH 040/953] perf/x86/intel: Support branch counters logging ANBZ: #8006 commit 33744916196b4ed7a50f6f47af7c3ad46b730ce6 upstream. The branch counters logging (A.K.A LBR event logging) introduces a per-counter indication of precise event occurrences in LBRs. It can provide a means to attribute exposed retirement latency to combinations of events across a block of instructions. It also provides a means of attributing Timed LBR latencies to events. The feature is first introduced on SRF/GRR. It is an enhancement of the ARCH LBR. It adds new fields in the LBR_INFO MSRs to log the occurrences of events on the GP counters. The information is displayed by the order of counters. The design proposed in this patch requires that the events which are logged must be in a group with the event that has LBR. If there are more than one LBR group, the counters logging information only from the current group (overflowed) are stored for the perf tool, otherwise the perf tool cannot know which and when other groups are scheduled especially when multiplexing is triggered. The user can ensure it uses the maximum number of counters that support LBR info (4 by now) by making the group large enough. The HW only logs events by the order of counters. The order may be different from the order of enabling which the perf tool can understand. When parsing the information of each branch entry, convert the counter order to the enabled order, and store the enabled order in the extension space. Unconditionally reset LBRs for an LBR event group when it's deleted. The logged counter information is only valid for the current LBR group. If another LBR group is scheduled later, the information from the stale LBRs would be otherwise wrongly interpreted. Add a sanity check in intel_pmu_hw_config(). Disable the feature if other counter filters (inv, cmask, edge, in_tx) are set or LBR call stack mode is enabled. (For the LBR call stack mode, we cannot simply flush the LBR, since it will break the call stack. Also, there is no obvious usage with the call stack mode for now.) Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't require any branch stack setup. Expose the maximum number of supported counters and the width of the counters into the sysfs. The perf tool can use the information to parse the logged counters in each branch. Intel-SIG: commit 33744916196b perf/x86/intel: Support branch counters logging Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 103 +++++++++++++++++++++++++++-- arch/x86/events/intel/ds.c | 2 +- arch/x86/events/intel/lbr.c | 85 +++++++++++++++++++++++- arch/x86/events/perf_event.h | 12 ++++ arch/x86/events/perf_event_flags.h | 1 + arch/x86/include/asm/msr-index.h | 5 ++ arch/x86/include/asm/perf_event.h | 4 ++ include/uapi/linux/perf_event.h | 3 + 8 files changed, 207 insertions(+), 8 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 38335d60f31f..8814a757e612 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2803,6 +2803,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { + u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -2811,8 +2812,10 @@ static void intel_pmu_enable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: + if (branch_sample_counters(event)) + enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: @@ -3063,7 +3066,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); + intel_pmu_lbr_save_brstack(&data, cpuc, event); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); @@ -3628,6 +3631,13 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); + /* Not all counters support the branch counter feature. */ + if (branch_sample_counters(event)) { + c2 = dyn_constraint(cpuc, c2, idx); + c2->idxmsk64 &= x86_pmu.lbr_counters; + c2->weight = hweight64(c2->idxmsk64); + } + return c2; } @@ -3916,6 +3926,58 @@ static int intel_pmu_hw_config(struct perf_event *event) if (needs_branch_stack(event) && is_sampling_event(event)) event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + if (branch_sample_counters(event)) { + struct perf_event *leader, *sibling; + int num = 0; + + if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || + (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) + return -EINVAL; + + /* + * The branch counter logging is not supported in the call stack + * mode yet, since we cannot simply flush the LBR during e.g., + * multiplexing. Also, there is no obvious usage with the call + * stack mode. Simply forbids it for now. + * + * If any events in the group enable the branch counter logging + * feature, the group is treated as a branch counter logging + * group, which requires the extra space to store the counters. + */ + leader = event->group_leader; + if (branch_sample_call_stack(leader)) + return -EINVAL; + if (branch_sample_counters(leader)) + num++; + leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; + + for_each_sibling_event(sibling, leader) { + if (branch_sample_call_stack(sibling)) + return -EINVAL; + if (branch_sample_counters(sibling)) + num++; + } + + if (num > fls(x86_pmu.lbr_counters)) + return -EINVAL; + /* + * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't + * require any branch stack setup. + * Clear the bit to avoid unnecessary branch stack setup. + */ + if (0 == (event->attr.branch_sample_type & + ~(PERF_SAMPLE_BRANCH_PLM_ALL | + PERF_SAMPLE_BRANCH_COUNTERS))) + event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + /* + * Force the leader to be a LBR event. So LBRs can be reset + * with the leader event. See intel_pmu_lbr_del() for details. + */ + if (!intel_pmu_needs_branch_stack(leader)) + return -EINVAL; + } + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) @@ -4399,8 +4461,13 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, */ if (event->attr.precise_ip == 3) { /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ - if (constraint_match(&fixed0_constraint, event->hw.config)) - return &fixed0_counter0_1_constraint; + if (constraint_match(&fixed0_constraint, event->hw.config)) { + /* The fixed counter 0 doesn't support LBR event logging. */ + if (branch_sample_counters(event)) + return &counter0_1_constraint; + else + return &fixed0_counter0_1_constraint; + } switch (c->idxmsk64 & 0x3ull) { case 0x1: @@ -4579,7 +4646,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) goto err; } - if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); @@ -5476,15 +5543,39 @@ static ssize_t branches_show(struct device *cdev, static DEVICE_ATTR_RO(branches); +static ssize_t branch_counter_nr_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); +} + +static DEVICE_ATTR_RO(branch_counter_nr); + +static ssize_t branch_counter_width_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); +} + +static DEVICE_ATTR_RO(branch_counter_width); + static struct attribute *lbr_attrs[] = { &dev_attr_branches.attr, + &dev_attr_branch_counter_nr.attr, + &dev_attr_branch_counter_width.attr, NULL }; static umode_t lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.lbr_nr ? attr->mode : 0; + /* branches */ + if (i == 0) + return x86_pmu.lbr_nr ? attr->mode : 0; + + return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; } static char pmu_name_str[30]; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 7566190389f0..5ff81dbf8aa3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1912,7 +1912,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); + intel_pmu_lbr_save_brstack(data, cpuc, event); } } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c3b0d15a9841..78cd5084104e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -676,6 +676,25 @@ void intel_pmu_lbr_del(struct perf_event *event) WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->pmu); + + /* + * The logged occurrences information is only valid for the + * current LBR group. If another LBR group is scheduled in + * later, the information from the stale LBRs will be wrongly + * interpreted. Reset the LBRs here. + * + * Only clear once for a branch counter group with the leader + * event. Because + * - Cannot simply reset the LBRs with the !cpuc->lbr_users. + * Because it's possible that the last LBR user is not in a + * branch counter group, e.g., a branch_counters group + + * several normal LBR events. + * - The LBR reset can be done with any one of the events in a + * branch counter group, since they are always scheduled together. + * It's easy to force the leader event an LBR event. + */ + if (is_branch_counters_group(event) && event == event->group_leader) + intel_pmu_lbr_reset(); } static inline bool vlbr_exclude_host(void) @@ -866,6 +885,8 @@ static __always_inline u16 get_lbr_cycles(u64 info) return cycles; } +static_assert((64 - PERF_BRANCH_ENTRY_INFO_BITS_MAX) > LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS); + static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, struct lbr_entry *entries) { @@ -898,11 +919,67 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, e->abort = !!(info & LBR_INFO_ABORT); e->cycles = get_lbr_cycles(info); e->type = get_lbr_br_type(info); + + /* + * Leverage the reserved field of cpuc->lbr_entries[i] to + * temporarily store the branch counters information. + * The later code will decide what content can be disclosed + * to the perf tool. Pleae see intel_pmu_lbr_counters_reorder(). + */ + e->reserved = (info >> LBR_INFO_BR_CNTR_OFFSET) & LBR_INFO_BR_CNTR_FULL_MASK; } cpuc->lbr_stack.nr = i; } +/* + * The enabled order may be different from the counter order. + * Update the lbr_counters with the enabled order. + */ +static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i, j, pos = 0, order[X86_PMC_IDX_MAX]; + struct perf_event *leader, *sibling; + u64 src, dst, cnt; + + leader = event->group_leader; + if (branch_sample_counters(leader)) + order[pos++] = leader->hw.idx; + + for_each_sibling_event(sibling, leader) { + if (!branch_sample_counters(sibling)) + continue; + order[pos++] = sibling->hw.idx; + } + + WARN_ON_ONCE(!pos); + + for (i = 0; i < cpuc->lbr_stack.nr; i++) { + src = cpuc->lbr_entries[i].reserved; + dst = 0; + for (j = 0; j < pos; j++) { + cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK; + dst |= cnt << j * LBR_INFO_BR_CNTR_BITS; + } + cpuc->lbr_counters[i] = dst; + cpuc->lbr_entries[i].reserved = 0; + } +} + +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_branch_counters_group(event)) { + intel_pmu_lbr_counters_reorder(cpuc, event); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters); + return; + } + + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); +} + static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc) { intel_pmu_store_lbr(cpuc, NULL); @@ -1173,8 +1250,10 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) for (i = 0; i < cpuc->lbr_stack.nr; ) { if (!cpuc->lbr_entries[i].from) { j = i; - while (++j < cpuc->lbr_stack.nr) + while (++j < cpuc->lbr_stack.nr) { cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; + cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j]; + } cpuc->lbr_stack.nr--; if (!cpuc->lbr_entries[i].from) continue; @@ -1525,8 +1604,12 @@ void __init intel_pmu_arch_lbr_init(void) x86_pmu.lbr_mispred = ecx.split.lbr_mispred; x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr; x86_pmu.lbr_br_type = ecx.split.lbr_br_type; + x86_pmu.lbr_counters = ecx.split.lbr_counters; x86_pmu.lbr_nr = lbr_nr; + if (!!x86_pmu.lbr_counters) + x86_pmu.flags |= PMU_FL_BR_CNTR; + if (x86_pmu.lbr_mispred) static_branch_enable(&x86_lbr_mispred); if (x86_pmu.lbr_timed_lbr) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d..b8a2d3ba4ccd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -110,6 +110,11 @@ static inline bool is_topdown_event(struct perf_event *event) return is_metric_event(event) || is_slots_event(event); } +static inline bool is_branch_counters_group(struct perf_event *event) +{ + return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ @@ -283,6 +288,7 @@ struct cpu_hw_events { int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ union { struct er_account *lbr_sel; struct er_account *lbr_ctl; @@ -881,6 +887,7 @@ struct x86_pmu { unsigned int lbr_mispred:1; unsigned int lbr_timed_lbr:1; unsigned int lbr_br_type:1; + unsigned int lbr_counters:4; void (*lbr_reset)(void); void (*lbr_read)(struct cpu_hw_events *cpuc); @@ -1005,6 +1012,7 @@ do { \ #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ +#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -1545,6 +1553,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); void intel_ds_init(void); +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event); + void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index a1685981c520..6c977c19f2cd 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -21,3 +21,4 @@ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ +PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index c75cc5610be3..246f6d3d9afa 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -244,6 +244,11 @@ #define LBR_INFO_CYCLES 0xffff #define LBR_INFO_BR_TYPE_OFFSET 56 #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) +#define LBR_INFO_BR_CNTR_OFFSET 32 +#define LBR_INFO_BR_CNTR_NUM 4 +#define LBR_INFO_BR_CNTR_BITS 2 +#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0) +#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0) #define MSR_ARCH_LBR_CTL 0x000014ce #define ARCH_LBR_CTL_LBREN BIT(0) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 0c4a93712ef5..9450594f1709 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -31,6 +31,7 @@ #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) #define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 @@ -224,6 +225,9 @@ union cpuid28_ecx { unsigned int lbr_timed_lbr:1; /* Branch Type Field Supported */ unsigned int lbr_br_type:1; + unsigned int reserved:13; + /* Branch counters (Event Logging) Supported */ + unsigned int lbr_counters:4; } split; unsigned int full; }; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4461f380425b..3a64499b0f5d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1437,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) -- Gitee From 5cc713ef08fc4125fc5420f293c1f5ff5497558b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:35 -0800 Subject: [PATCH 041/953] perf/x86/intel/uncore: Generic uncore_get_uncores and MMIO format of SPR ANBZ: #8008 commit cf35791476fcb3230b98a42241a56242d60ebdd3 upstream. Factor out SPR_UNCORE_MMIO_COMMON_FORMAT which can be reused by Granite Rapids in the following patch. Granite Rapids have more uncore units than Sapphire Rapids. Add new parameters to support adjustable uncore units. No functional change. Intel-SIG: commit cf35791476fc perf/x86/intel/uncore: Generic uncore_get_uncores and MMIO format of SPR Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore_snbep.c | 34 +++++++++++++++++++--------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 49bc27ab26ad..38260ef1340d 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6085,13 +6085,16 @@ static struct uncore_event_desc spr_uncore_imc_events[] = { { /* end: all zeroes */ }, }; +#define SPR_UNCORE_MMIO_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_ops + static struct intel_uncore_type spr_uncore_imc = { - SPR_UNCORE_COMMON_FORMAT(), + SPR_UNCORE_MMIO_COMMON_FORMAT(), .name = "imc", .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .ops = &spr_uncore_mmio_ops, .event_descs = spr_uncore_imc_events, }; @@ -6418,7 +6421,8 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type, static struct intel_uncore_type ** uncore_get_uncores(enum uncore_access_type type_id, int num_extra, - struct intel_uncore_type **extra) + struct intel_uncore_type **extra, int max_num_types, + struct intel_uncore_type **uncores) { struct intel_uncore_type **types, **start_types; int i; @@ -6427,9 +6431,9 @@ uncore_get_uncores(enum uncore_access_type type_id, int num_extra, /* Only copy the customized features */ for (; *types; types++) { - if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES) + if ((*types)->type_id >= max_num_types) continue; - uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]); + uncore_type_customized_copy(*types, uncores[(*types)->type_id]); } for (i = 0; i < num_extra; i++, types++) @@ -6476,7 +6480,9 @@ void spr_uncore_cpu_init(void) uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, UNCORE_SPR_MSR_EXTRA_UNCORES, - spr_msr_uncores); + spr_msr_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); if (type) { @@ -6558,7 +6564,9 @@ int spr_uncore_pci_init(void) spr_update_device_location(UNCORE_SPR_M3UPI); uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, UNCORE_SPR_PCI_EXTRA_UNCORES, - spr_pci_uncores); + spr_pci_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); return 0; } @@ -6566,12 +6574,16 @@ void spr_uncore_mmio_init(void) { int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true); - if (ret) - uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL); - else { + if (ret) { + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); + } else { uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, UNCORE_SPR_MMIO_EXTRA_UNCORES, - spr_mmio_uncores); + spr_mmio_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; } -- Gitee From ffe4c8c62af6dbe85a669257615e221e7e7d5d3e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:36 -0800 Subject: [PATCH 042/953] perf/x86/uncore: Use u64 to replace unsigned for the uncore offsets array ANBZ: #8008 commit b560e0cd882b11921c84307efe139f1247434c5e upstream. The current perf doesn't save the complete address of an uncore unit. The complete address of each unit is calculated by the base address + offset. The type of the base address is u64, while the type of offset is unsigned. In the old platforms (without the discovery table method), the base address and offset are hard coded in the driver. Perf can always use the lowest address as the base address. Everything works well. In the new platforms (starting from SPR), the discovery table provides a complete address for all uncore units. To follow the current framework/codes, when parsing the discovery table, the complete address of the first box is stored as a base address. The offset of the following units is calculated by the complete address of the unit minus the base address (the address of the first unit). On GNR, the latter units may have a lower address compared to the first unit. So the offset is a negative value. The upper 32 bits are lost when casting a negative u64 to an unsigned type. Use u64 to replace unsigned for the uncore offsets array to correct the above case. There is no functional change. Intel-SIG: commit b560e0cd882b perf/x86/uncore: Use u64 to replace unsigned for the uncore offsets array Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.h | 6 +++--- arch/x86/events/intel/uncore_discovery.c | 5 +++-- arch/x86/events/intel/uncore_discovery.h | 2 +- arch/x86/events/intel/uncore_nhmex.c | 2 +- arch/x86/events/intel/uncore_snbep.c | 6 +++--- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c30fb5bb1222..7428ecaddf72 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -72,9 +72,9 @@ struct intel_uncore_type { unsigned single_fixed:1; unsigned pair_ctr_ctl:1; union { - unsigned *msr_offsets; - unsigned *pci_offsets; - unsigned *mmio_offsets; + u64 *msr_offsets; + u64 *pci_offsets; + u64 *mmio_offsets; }; unsigned *box_ids; struct event_constraint unconstrainted; diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index cb488e41807c..9a698a92962a 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -125,7 +125,8 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, int die, bool parsed) { struct intel_uncore_discovery_type *type; - unsigned int *box_offset, *ids; + unsigned int *ids; + u64 *box_offset; int i; if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { @@ -153,7 +154,7 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, if (!type) return; - box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); + box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); if (!box_offset) return; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 6ee80ad3423e..22e769a81103 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -125,7 +125,7 @@ struct intel_uncore_discovery_type { u8 ctr_offset; /* Counter 0 offset */ u16 num_boxes; /* number of boxes for the uncore block */ unsigned int *ids; /* Box IDs */ - unsigned int *box_offset; /* Box offset */ + u64 *box_offset; /* Box offset */ }; bool intel_uncore_has_discovery_tables(int *ignore); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 173e2674be6e..56eea2c66cfb 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -306,7 +306,7 @@ static const struct attribute_group nhmex_uncore_cbox_format_group = { }; /* msr offset for each instance of cbox */ -static unsigned nhmex_cbox_msr_offsets[] = { +static u64 nhmex_cbox_msr_offsets[] = { 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 38260ef1340d..e1e6ab6bf244 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5278,7 +5278,7 @@ void snr_uncore_mmio_init(void) /* ICX uncore support */ -static unsigned icx_cha_msr_offsets[] = { +static u64 icx_cha_msr_offsets[] = { 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, @@ -5326,7 +5326,7 @@ static struct intel_uncore_type icx_uncore_chabox = { .format_group = &snr_uncore_chabox_format_group, }; -static unsigned icx_msr_offsets[] = { +static u64 icx_msr_offsets[] = { 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, }; @@ -6190,7 +6190,7 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { */ #define SPR_UNCORE_UPI_NUM_BOXES 4 -static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { +static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { 0, 0x8000, 0x10000, 0x18000 }; -- Gitee From e47a5707854ecd0912bf0f260755e713547aa03d Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:37 -0800 Subject: [PATCH 043/953] perf/x86/intel/uncore: Support Granite Rapids ANBZ: #8008 commit 632c4bf6d007862307440b177d9fee829857e8bb upstream. The same as Sapphire Rapids, Granite Rapids also supports the discovery table feature. All the basic uncore PMON information can be retrieved from the discovery table which resides in the BIOS. There are 4 new units are added on Granite Rapids, b2cmi, b2cxl, ubox, and mdf_sbo. The layout of the counters is exactly the same as the generic uncore counters. Only add a name for the new units. All the details can be retrieved from the discovery table. The description of the new units can be found at https://www.intel.com/content/www/us/en/secure/content-details/772943/content-details.html The other units, e.g., cha, iio, irp, pcu, and imc, are the same as Sapphire Rapids. Ignore the upi and b2upi units in the discovery table, which are broken for now. Intel-SIG: commit 632c4bf6d007 perf/x86/intel/uncore: Support Granite Rapids Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.c | 10 ++++ arch/x86/events/intel/uncore.h | 4 ++ arch/x86/events/intel/uncore_snbep.c | 87 ++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 69043e02e8a7..01c01cae82ef 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1814,6 +1814,14 @@ static const struct intel_uncore_init_fun spr_uncore_init __initconst = { .uncore_units_ignore = spr_uncore_units_ignore, }; +static const struct intel_uncore_init_fun gnr_uncore_init __initconst = { + .cpu_init = gnr_uncore_cpu_init, + .pci_init = gnr_uncore_pci_init, + .mmio_init = gnr_uncore_mmio_init, + .use_discovery = true, + .uncore_units_ignore = gnr_uncore_units_ignore, +}; + static const struct intel_uncore_init_fun generic_uncore_init __initconst = { .cpu_init = intel_uncore_generic_uncore_cpu_init, .pci_init = intel_uncore_generic_uncore_pci_init, @@ -1865,6 +1873,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), {}, diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 7428ecaddf72..4838502d89ae 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -593,6 +593,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; extern int spr_uncore_units_ignore[]; +extern int gnr_uncore_units_ignore[]; /* uncore_snb.c */ int snb_uncore_pci_init(void); @@ -634,6 +635,9 @@ void icx_uncore_mmio_init(void); int spr_uncore_pci_init(void); void spr_uncore_cpu_init(void); void spr_uncore_mmio_init(void); +int gnr_uncore_pci_init(void); +void gnr_uncore_cpu_init(void); +void gnr_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index e1e6ab6bf244..31ad5841de43 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6590,3 +6590,90 @@ void spr_uncore_mmio_init(void) } /* end of SPR uncore support */ + +/* GNR uncore support */ + +#define UNCORE_GNR_NUM_UNCORE_TYPES 23 +#define UNCORE_GNR_TYPE_15 15 +#define UNCORE_GNR_B2UPI 18 +#define UNCORE_GNR_TYPE_21 21 +#define UNCORE_GNR_TYPE_22 22 + +int gnr_uncore_units_ignore[] = { + UNCORE_SPR_UPI, + UNCORE_GNR_TYPE_15, + UNCORE_GNR_B2UPI, + UNCORE_GNR_TYPE_21, + UNCORE_GNR_TYPE_22, + UNCORE_IGNORE_END +}; + +static struct intel_uncore_type gnr_uncore_ubox = { + .name = "ubox", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type gnr_uncore_b2cmi = { + SPR_UNCORE_PCI_COMMON_FORMAT(), + .name = "b2cmi", +}; + +static struct intel_uncore_type gnr_uncore_b2cxl = { + SPR_UNCORE_MMIO_COMMON_FORMAT(), + .name = "b2cxl", +}; + +static struct intel_uncore_type gnr_uncore_mdf_sbo = { + .name = "mdf_sbo", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { + &spr_uncore_chabox, + &spr_uncore_iio, + &spr_uncore_irp, + NULL, + &spr_uncore_pcu, + &gnr_uncore_ubox, + &spr_uncore_imc, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + &gnr_uncore_b2cmi, + &gnr_uncore_b2cxl, + NULL, + NULL, + &gnr_uncore_mdf_sbo, + NULL, + NULL, +}; + +void gnr_uncore_cpu_init(void) +{ + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +int gnr_uncore_pci_init(void) +{ + uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + return 0; +} + +void gnr_uncore_mmio_init(void) +{ + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +/* end of GNR uncore support */ -- Gitee From 8461fc5d947919ee27153e2161f2f414647cf93c Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:38 -0800 Subject: [PATCH 044/953] perf/x86/intel/uncore: Support IIO free-running counters on GNR ANBZ: #8008 commit 388d76175bd9bbad52bbff25c88361d9e5c6615e upstream. The free-running counters for IIO uncore blocks on Granite Rapids are similar to Sapphire Rapids. The key difference is the offset of the registers. The number of the IIO uncore blocks can also be retrieved from the discovery table. Intel-SIG: commit 388d76175bd9 perf/x86/intel/uncore: Support IIO free-running counters on GNR Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore_snbep.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 31ad5841de43..1efbacfff47d 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6654,11 +6654,21 @@ static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { NULL, }; +static struct freerunning_counters gnr_iio_freerunning[] = { + [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 }, + [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 }, + [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 }, +}; + void gnr_uncore_cpu_init(void) { - uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, 0, NULL, + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, + UNCORE_SPR_MSR_EXTRA_UNCORES, + spr_msr_uncores, UNCORE_GNR_NUM_UNCORE_TYPES, gnr_uncores); + spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); + spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning; } int gnr_uncore_pci_init(void) -- Gitee From 2a8e2728aaa6d881e0cd75e61775d65bd35cbf99 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:39 -0800 Subject: [PATCH 045/953] perf/x86/intel/uncore: Support Sierra Forest and Grand Ridge ANBZ: #8008 commit cb4a6ccf35839895da63fcf6134d6fbd13224805 upstream. The same as Granite Rapids, the Sierra Forest and Grand Ridge also supports the discovery table feature and the same type of the uncore units. The difference of the available units and counters can be retrieved from the discovery table automatically. Just add the CPU model ID. Intel-SIG: commit cb4a6ccf3583 perf/x86/intel/uncore: Support Sierra Forest and Grand Ridge Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 01c01cae82ef..4e26a28536de 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1877,6 +1877,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); -- Gitee From 832a2ac0ee7890198dd8a43ca27af93a17f06ad8 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:29 -0700 Subject: [PATCH 046/953] platform/x86/intel/ifs: Store IFS generation number MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 97a5e801b3045c1e800f76bc0fb544972538089d upstream. Intel-SIG: commit 97a5e801b304 platform/x86/intel/ifs: Store IFS generation number Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF IFS generation number is reported via MSR_INTEGRITY_CAPS. As IFS support gets added to newer CPUs, some differences are expected during IFS image loading and test flows. Define MSR bitmasks to extract and store the generation in driver data, so that driver can modify its MSR interaction appropriately. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-2-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- arch/x86/include/asm/msr-index.h | 1 + drivers/platform/x86/intel/ifs/core.c | 3 +++ drivers/platform/x86/intel/ifs/ifs.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 246f6d3d9afa..0e72ec1d6e25 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -230,6 +230,7 @@ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) +#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) #define MSR_LBR_NHM_FROM 0x00000680 #define MSR_LBR_NHM_TO 0x000006c0 diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 306f886b52d2..4ff2aa4b484b 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. */ +#include #include #include #include @@ -94,6 +95,8 @@ static int __init ifs_init(void) for (i = 0; i < IFS_NUMTESTS; i++) { if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit))) continue; + ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, + msrval); ret = misc_register(&ifs_devices[i].misc); if (ret) goto err_exit; diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 93191855890f..d666aeed20fc 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -229,6 +229,7 @@ struct ifs_test_caps { * @status: it holds simple status pass/fail/untested * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file + * @generation: IFS test generation enumerated by hardware */ struct ifs_data { int loaded_version; @@ -238,6 +239,7 @@ struct ifs_data { int status; u64 scan_details; u32 cur_batch; + u32 generation; }; struct ifs_work { -- Gitee From dbcafb7f58e461694bfdf76861ae0d49f3b8952a Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:30 -0700 Subject: [PATCH 047/953] platform/x86/intel/ifs: Refactor image loading code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit a138ac2656d1329c3994a227769b7ba3926818a7 upstream. Intel-SIG: commit a138ac2656d1 platform/x86/intel/ifs: Refactor image loading code Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF IFS image loading flow is slightly different for newer IFS generations. In preparation for adding support for newer IFS generations, refactor portions of existing image loading code for reuse. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-3-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 31 ++++++++++++++++----------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index cefd0d886cfd..851c97cc6a6b 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -80,6 +80,23 @@ static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_typ return NULL; } +static void hashcopy_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_hash_status)) + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + else + dev_err(dev, "Hash copy error : %s\n", scan_hash_status[err_code]); +} + +static void auth_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_authentication_status)) + dev_err(dev, "invalid error code 0x%x for authentication\n", err_code); + else + dev_err(dev, "Chunk authentication error : %s\n", + scan_authentication_status[err_code]); +} + /* * To copy scan hashes and authenticate test chunks, the initiating cpu must point * to the EDX:EAX to the test image in linear address. @@ -109,11 +126,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (!hashes_status.valid) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_hash_status)) { - dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); - goto done; - } - dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + hashcopy_err_message(dev, err_code); goto done; } @@ -133,13 +146,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (err_code) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_authentication_status)) { - dev_err(dev, - "invalid error code 0x%x for authentication\n", err_code); - goto done; - } - dev_err(dev, "Chunk authentication error %s\n", - scan_authentication_status[err_code]); + auth_err_message(dev, err_code); goto done; } } -- Gitee From d49af99d290fe5ce82c855fdc347fe6c27d3d46e Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:31 -0700 Subject: [PATCH 048/953] platform/x86/intel/ifs: Gen2 scan image loading MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 07f47c01b3bc2a42c4d4da35831edab10aa60449 upstream. Intel-SIG: commit 07f47c01b3bc platform/x86/intel/ifs: Gen2 scan image loading Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Scan image loading flow for newer IFS generations are slightly different from that of current generation. In newer schemes, loading need not be done once for each socket as was done in gen0. Also the width of NUM_CHUNKS bitfield in SCAN_HASHES_STATUS MSR has increased from 8 -> 16 bits. Similarly there are width differences for CHUNK_AUTHENTICATION_STATUS too. Further the parameter to AUTHENTICATE_AND_COPY_CHUNK is passed differently in newer generations. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-4-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/ifs.h | 27 +++++++ drivers/platform/x86/intel/ifs/load.c | 112 +++++++++++++++++++++++++- 2 files changed, 137 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index d666aeed20fc..4824316b3acd 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -137,6 +137,8 @@ #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_SAF_CTRL 0x000004f0 + #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 @@ -158,6 +160,19 @@ union ifs_scan_hashes_status { }; }; +union ifs_scan_hashes_status_gen2 { + u64 data; + struct { + u16 chunk_size; + u16 num_chunks; + u32 error_code :8; + u32 chunks_in_stride :9; + u32 rsvd :2; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + /* MSR_CHUNKS_AUTH_STATUS bit fields */ union ifs_chunks_auth_status { u64 data; @@ -170,6 +185,16 @@ union ifs_chunks_auth_status { }; }; +union ifs_chunks_auth_status_gen2 { + u64 data; + struct { + u16 valid_chunks; + u16 total_chunks; + u32 error_code :8; + u32 rsvd2 :24; + }; +}; + /* MSR_ACTIVATE_SCAN bit fields */ union ifs_scan { u64 data; @@ -230,6 +255,7 @@ struct ifs_test_caps { * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file * @generation: IFS test generation enumerated by hardware + * @chunk_size: size of a test chunk */ struct ifs_data { int loaded_version; @@ -240,6 +266,7 @@ struct ifs_data { u64 scan_details; u32 cur_batch; u32 generation; + u32 chunk_size; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 851c97cc6a6b..6b827247945b 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -2,6 +2,7 @@ /* Copyright(c) 2022 Intel Corporation. */ #include +#include #include #include @@ -26,6 +27,11 @@ union meta_data { #define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define META_TYPE_IFS 1 +#define INVALIDATE_STRIDE 0x1UL +#define IFS_GEN_STRIDE_AWARE 2 +#define AUTH_INTERRUPTED_ERROR 5 +#define IFS_AUTH_RETRY_CT 10 + static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */ static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ @@ -44,7 +50,10 @@ static const char * const scan_hash_status[] = { static const char * const scan_authentication_status[] = { [0] = "No error reported", [1] = "Attempt to authenticate a chunk which is already marked as authentic", - [2] = "Chunk authentication error. The hash of chunk did not match expected value" + [2] = "Chunk authentication error. The hash of chunk did not match expected value", + [3] = "Reserved", + [4] = "Chunk outside the current stride", + [5] = "Authentication flow interrupted", }; #define MC_HEADER_META_TYPE_END (0) @@ -154,6 +163,102 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) complete(&ifs_done); } +static int get_num_chunks(int gen, union ifs_scan_hashes_status_gen2 status) +{ + return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; +} + +static bool need_copy_scan_hashes(struct ifs_data *ifsd) +{ + return !ifsd->loaded || + ifsd->generation < IFS_GEN_STRIDE_AWARE || + ifsd->loaded_version != ifs_header_ptr->rev; +} + +static int copy_hashes_authenticate_chunks_gen2(struct device *dev) +{ + union ifs_scan_hashes_status_gen2 hashes_status; + union ifs_chunks_auth_status_gen2 chunk_status; + u32 err_code, valid_chunks, total_chunks; + int i, num_chunks, chunk_size; + union meta_data *ifs_meta; + int starting_chunk_nr; + struct ifs_data *ifsd; + u64 linear_addr, base; + u64 chunk_table[2]; + int retry_count; + + ifsd = ifs_get_data(dev); + + if (need_copy_scan_hashes(ifsd)) { + wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); + rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + + /* enumerate the scan image information */ + chunk_size = hashes_status.chunk_size * SZ_1K; + err_code = hashes_status.error_code; + + num_chunks = get_num_chunks(ifsd->generation, hashes_status); + + if (!hashes_status.valid) { + hashcopy_err_message(dev, err_code); + return -EIO; + } + ifsd->loaded_version = ifs_header_ptr->rev; + ifsd->chunk_size = chunk_size; + } else { + num_chunks = ifsd->valid_chunks; + chunk_size = ifsd->chunk_size; + } + + if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { + wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + if (chunk_status.valid_chunks != 0) { + dev_err(dev, "Couldn't invalidate installed stride - %d\n", + chunk_status.valid_chunks); + return -EIO; + } + } + + base = ifs_test_image_ptr; + ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); + starting_chunk_nr = ifs_meta->starting_chunk; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + retry_count = IFS_AUTH_RETRY_CT; + linear_addr = base + i * chunk_size; + + chunk_table[0] = starting_chunk_nr + i; + chunk_table[1] = linear_addr; + do { + wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + err_code = chunk_status.error_code; + } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); + + if (err_code) { + ifsd->loading_error = true; + auth_err_message(dev, err_code); + return -EIO; + } + } + + valid_chunks = chunk_status.valid_chunks; + total_chunks = chunk_status.total_chunks; + + if (valid_chunks != total_chunks) { + ifsd->loading_error = true; + dev_err(dev, "Couldn't authenticate all the chunks. Authenticated %d total %d.\n", + valid_chunks, total_chunks); + return -EIO; + } + ifsd->valid_chunks = valid_chunks; + + return 0; +} + static int validate_ifs_metadata(struct device *dev) { struct ifs_data *ifsd = ifs_get_data(dev); @@ -206,7 +311,9 @@ static int scan_chunks_sanity_check(struct device *dev) return ret; ifsd->loading_error = false; - ifsd->loaded_version = ifs_header_ptr->rev; + + if (ifsd->generation > 0) + return copy_hashes_authenticate_chunks_gen2(dev); /* copy the scan hash and authenticate per package */ cpus_read_lock(); @@ -226,6 +333,7 @@ static int scan_chunks_sanity_check(struct device *dev) ifs_pkg_auth[curr_pkg] = 1; } ret = 0; + ifsd->loaded_version = ifs_header_ptr->rev; out: cpus_read_unlock(); -- Gitee From a4be615d1bafa5de8b86d2ab041370723c3e856d Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:32 -0700 Subject: [PATCH 049/953] platform/x86/intel/ifs: Gen2 Scan test support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 72b96ee29ed6f7670bbb180ba694816e33d361d1 upstream. Intel-SIG: commit 72b96ee29ed6 platform/x86/intel/ifs: Gen2 Scan test support Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Width of chunk related bitfields is ACTIVATE_SCAN and SCAN_STATUS MSRs are different in newer IFS generation compared to gen0. Make changes to scan test flow such that MSRs are populated appropriately based on the generation supported by hardware. Account for the 8/16 bit MSR bitfield width differences between gen0 and newer generations for the scan test trace event too. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-5-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/ifs.h | 28 ++++++++++++++++++----- drivers/platform/x86/intel/ifs/runtest.c | 29 ++++++++++++++++++------ include/trace/events/intel_ifs.h | 16 ++++++------- 3 files changed, 52 insertions(+), 21 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 4824316b3acd..f0dd849b3400 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -199,9 +199,17 @@ union ifs_chunks_auth_status_gen2 { union ifs_scan { u64 data; struct { - u32 start :8; - u32 stop :8; - u32 rsvd :16; + union { + struct { + u8 start; + u8 stop; + u16 rsvd; + } gen0; + struct { + u16 start; + u16 stop; + } gen2; + }; u32 delay :31; u32 sigmce :1; }; @@ -211,9 +219,17 @@ union ifs_scan { union ifs_status { u64 data; struct { - u32 chunk_num :8; - u32 chunk_stop_index :8; - u32 rsvd1 :16; + union { + struct { + u8 chunk_num; + u8 chunk_stop_index; + u16 rsvd1; + } gen0; + struct { + u16 chunk_num; + u16 chunk_stop_index; + } gen2; + }; u32 error_code :8; u32 rsvd2 :22; u32 control_error :1; diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 43c864add778..fd6a9e3799a3 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -171,21 +171,31 @@ static void ifs_test_core(int cpu, struct device *dev) union ifs_status status; unsigned long timeout; struct ifs_data *ifsd; + int to_start, to_stop; + int status_chunk; u64 msrvals[2]; int retries; ifsd = ifs_get_data(dev); - activate.rsvd = 0; + activate.gen0.rsvd = 0; activate.delay = IFS_THREAD_WAIT; activate.sigmce = 0; - activate.start = 0; - activate.stop = ifsd->valid_chunks - 1; + to_start = 0; + to_stop = ifsd->valid_chunks - 1; + + if (ifsd->generation) { + activate.gen2.start = to_start; + activate.gen2.stop = to_stop; + } else { + activate.gen0.start = to_start; + activate.gen0.stop = to_stop; + } timeout = jiffies + HZ / 2; retries = MAX_IFS_RETRIES; - while (activate.start <= activate.stop) { + while (to_start <= to_stop) { if (time_after(jiffies, timeout)) { status.error_code = IFS_SW_TIMEOUT; break; @@ -196,13 +206,14 @@ static void ifs_test_core(int cpu, struct device *dev) status.data = msrvals[1]; - trace_ifs_status(cpu, activate, status); + trace_ifs_status(cpu, to_start, to_stop, status.data); /* Some cases can be retried, give up for others */ if (!can_restart(status)) break; - if (status.chunk_num == activate.start) { + status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num; + if (status_chunk == to_start) { /* Check for forward progress */ if (--retries == 0) { if (status.error_code == IFS_NO_ERROR) @@ -211,7 +222,11 @@ static void ifs_test_core(int cpu, struct device *dev) } } else { retries = MAX_IFS_RETRIES; - activate.start = status.chunk_num; + if (ifsd->generation) + activate.gen2.start = status_chunk; + else + activate.gen0.start = status_chunk; + to_start = status_chunk; } } diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index d7353024016c..af0af3f1d9b7 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,25 +10,25 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), + TP_PROTO(int cpu, int start, int stop, u64 status), - TP_ARGS(cpu, activate, status), + TP_ARGS(cpu, start, stop, status), TP_STRUCT__entry( __field( u64, status ) __field( int, cpu ) - __field( u8, start ) - __field( u8, stop ) + __field( u16, start ) + __field( u16, stop ) ), TP_fast_assign( __entry->cpu = cpu; - __entry->start = activate.start; - __entry->stop = activate.stop; - __entry->status = status.data; + __entry->start = start; + __entry->stop = stop; + __entry->status = status; ), - TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", + TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", __entry->cpu, __entry->start, __entry->stop, -- Gitee From e08788a6f6028d457d845243da4026205cae3898 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:33 -0700 Subject: [PATCH 050/953] platform/x86/intel/ifs: Validate image size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 25a76dbb36dd58ad4df7f6a4dc43061a10b0d817 upstream. Intel-SIG: commit 25a76dbb36dd platform/x86/intel/ifs: Validate image size Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Perform additional validation prior to loading IFS image. Error out if the size of the file being loaded doesn't match the size specified in the header. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-6-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 6b827247945b..582f1801aaaa 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -375,6 +375,7 @@ int ifs_load_firmware(struct device *dev) { const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); + unsigned int expected_size; const struct firmware *fw; char scan_path[64]; int ret = -EINVAL; @@ -389,6 +390,13 @@ int ifs_load_firmware(struct device *dev) goto done; } + expected_size = ((struct microcode_header_intel *)fw->data)->totalsize; + if (fw->size != expected_size) { + dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", + expected_size, fw->size); + return -EINVAL; + } + ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); if (ret) goto release; -- Gitee From b89c433bcb53d04df981ca6c2f1dc9f83e8ee462 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:34 -0700 Subject: [PATCH 051/953] platform/x86/intel/ifs: Metadata validation for start_chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 60d2e1b37d530d6b1f8b7773cebaf8bbc1536b28 upstream. Intel-SIG: commit 60d2e1b37d53 platform/x86/intel/ifs: Metadata validation for start_chunk Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Add an additional check to validate IFS image metadata field prior to loading the test image. If start_chunk is not a multiple of chunks_per_stride error out. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-7-jithu.joseph@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 582f1801aaaa..959b1878cae6 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -291,6 +291,13 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->chunks_per_stride && + (ifs_meta->starting_chunk % ifs_meta->chunks_per_stride != 0)) { + dev_warn(dev, "Starting chunk num %u not a multiple of chunks_per_stride %u\n", + ifs_meta->starting_chunk, ifs_meta->chunks_per_stride); + return ret; + } + return 0; } -- Gitee From dbbc9f1db3ea2539b048e361ff146b77745e2e3e Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:35 -0700 Subject: [PATCH 052/953] platform/x86/intel/ifs: Add new CPU support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit e6483a0b59026ded36a6f5eba1425a6b0965984a upstream. Intel-SIG: commit e6483a0b5902 platform/x86/intel/ifs: Add new CPU support Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Add Granite Rapids(GNR) and Sierra Forest(SRF) cpuids to x86 match table so that IFS driver can be loaded for those. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-8-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 4ff2aa4b484b..0c8927916373 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -18,6 +18,9 @@ static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X), X86_MATCH(EMERALDRAPIDS_X), + X86_MATCH(GRANITERAPIDS_X), + X86_MATCH(GRANITERAPIDS_D), + X86_MATCH(ATOM_CRESTMONT_X), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); -- Gitee From 6c2b7b6a817cf4f48ae11239c7ea55e65a05700d Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:36 -0700 Subject: [PATCH 053/953] platform/x86/intel/ifs: Add new error code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit b9aa9e4c8b4e52b6f2f5986b27e97f4b6163f0bf upstream. Intel-SIG: commit b9aa9e4c8b4e platform/x86/intel/ifs: Add new error code Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Make driver aware of a newly added error code so that it can provide a more appropriate error message. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-9-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/runtest.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index fd6a9e3799a3..a25fabea35e6 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -40,6 +40,8 @@ enum ifs_status_err_code { IFS_UNASSIGNED_ERROR_CODE = 7, IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, IFS_INTERRUPTED_DURING_EXECUTION = 9, + IFS_UNASSIGNED_ERROR_CODE_0xA = 0xA, + IFS_CORRUPTED_CHUNK = 0xB, }; static const char * const scan_test_status[] = { @@ -55,6 +57,8 @@ static const char * const scan_test_status[] = { [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", + [IFS_UNASSIGNED_ERROR_CODE_0xA] = "Unassigned error code 0xA", + [IFS_CORRUPTED_CHUNK] = "Scan operation aborted due to corrupted image. Try reloading", }; static void message_not_tested(struct device *dev, int cpu, union ifs_status status) @@ -123,6 +127,8 @@ static bool can_restart(union ifs_status status) case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: case IFS_CORE_NOT_CAPABLE_CURRENTLY: case IFS_UNASSIGNED_ERROR_CODE: + case IFS_UNASSIGNED_ERROR_CODE_0xA: + case IFS_CORRUPTED_CHUNK: break; } return false; -- Gitee From 2e8dec6a2a29fea9b0d8d46239e9460632b0d3e2 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:37 -0700 Subject: [PATCH 054/953] platform/x86/intel/ifs: ARRAY BIST for Sierra Forest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 06d65b2bc532fc9af1c55aa7a18cfd237ce46588 upstream. Intel-SIG: commit 06d65b2bc532 platform/x86/intel/ifs: ARRAY BIST for Sierra Forest Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Array BIST MSR addresses, bit definition and semantics are different for Sierra Forest. Branch into a separate Array BIST flow on Sierra Forest when user invokes Array Test. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-10-jithu.joseph@intel.com [ij: ARRAY_GEN_* -> ARRAY_GEN* for consistency] Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/core.c | 15 +++++----- drivers/platform/x86/intel/ifs/ifs.h | 7 +++++ drivers/platform/x86/intel/ifs/runtest.c | 37 +++++++++++++++++++++++- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 0c8927916373..7b11198d85a1 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -11,16 +11,16 @@ #include "ifs.h" -#define X86_MATCH(model) \ +#define X86_MATCH(model, array_gen) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) + INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen) static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { - X86_MATCH(SAPPHIRERAPIDS_X), - X86_MATCH(EMERALDRAPIDS_X), - X86_MATCH(GRANITERAPIDS_X), - X86_MATCH(GRANITERAPIDS_D), - X86_MATCH(ATOM_CRESTMONT_X), + X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0), + X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0), + X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); @@ -100,6 +100,7 @@ static int __init ifs_init(void) continue; ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, msrval); + ifs_devices[i].rw_data.array_gen = (u32)m->driver_data; ret = misc_register(&ifs_devices[i].misc); if (ret) goto err_exit; diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index f0dd849b3400..56b9f3e3cf76 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -137,6 +137,8 @@ #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_ARRAY_TRIGGER 0x000002d6 +#define MSR_ARRAY_STATUS 0x000002d7 #define MSR_SAF_CTRL 0x000004f0 #define SCAN_NOT_TESTED 0 @@ -146,6 +148,9 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define ARRAY_GEN0 0 +#define ARRAY_GEN1 1 + /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { u64 data; @@ -272,6 +277,7 @@ struct ifs_test_caps { * @cur_batch: number indicating the currently loaded test file * @generation: IFS test generation enumerated by hardware * @chunk_size: size of a test chunk + * @array_gen: test generation of array test */ struct ifs_data { int loaded_version; @@ -283,6 +289,7 @@ struct ifs_data { u32 cur_batch; u32 generation; u32 chunk_size; + u32 array_gen; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index a25fabea35e6..13ecd55c6668 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -329,6 +329,38 @@ static void ifs_array_test_core(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define ARRAY_GEN1_TEST_ALL_ARRAYS 0x0ULL +#define ARRAY_GEN1_STATUS_FAIL 0x1ULL + +static int do_array_test_gen1(void *status) +{ + int cpu = smp_processor_id(); + int first; + + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + } + + return 0; +} + +static void ifs_array_test_gen1(int cpu, struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + u64 status = 0; + + stop_core_cpuslocked(cpu, do_array_test_gen1, &status); + ifsd->scan_details = status; + + if (status & ARRAY_GEN1_STATUS_FAIL) + ifsd->status = SCAN_TEST_FAIL; + else + ifsd->status = SCAN_TEST_PASS; +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -357,7 +389,10 @@ int do_core_test(int cpu, struct device *dev) ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: - ifs_array_test_core(cpu, dev); + if (ifsd->array_gen == ARRAY_GEN0) + ifs_array_test_core(cpu, dev); + else + ifs_array_test_gen1(cpu, dev); break; default: ret = -EINVAL; -- Gitee From b296a6ce287e7cbc2e9acabeab64da0240d117c0 Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Wed, 24 Jan 2024 16:10:22 +0800 Subject: [PATCH 055/953] x86: configs: Add Intel In Field Scan(IFS) kernel config ANBZ: #8013 Intel-SIG: no upstream x86: configs: Add Intel In Field Scan(IFS) kernel config Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 714c90743cba..a61c59967e02 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -5695,7 +5695,7 @@ CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y # CONFIG_THINKPAD_LMI is not set # CONFIG_INTEL_ATOMISP2_PM is not set -# CONFIG_INTEL_IFS is not set +CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 4fb6c9d9d8b7..937a54d025e9 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -5688,7 +5688,7 @@ CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y # CONFIG_THINKPAD_LMI is not set # CONFIG_INTEL_ATOMISP2_PM is not set -# CONFIG_INTEL_IFS is not set +CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m -- Gitee From 625589f2209b298893932fd59cb160498c3d4751 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 25 Jan 2024 00:22:50 -0800 Subject: [PATCH 056/953] platform/x86/intel/ifs: Call release_firmware() when handling errors. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit e8931ebb0143dc4a3ad302be6f963ae375548bc0 upstream. Intel-SIG: commit e8931ebb0143 platform/x86/intel/ifs: Call release_firmware() when handling errors. Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF [ Upstream commit 8c898ec07a2fc1d4694e81097a48e94a3816308d ] Missing release_firmware() due to error handling blocked any future image loading. Fix the return code and release_fiwmare() to release the bad image. Fixes: 25a76dbb36dd ("platform/x86/intel/ifs: Validate image size") Reported-by: Pengfei Xu Signed-off-by: Jithu Joseph Signed-off-by: Ashok Raj Tested-by: Pengfei Xu Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240125082254.424859-2-ashok.raj@intel.com Signed-off-by: Hans de Goede Signed-off-by: Sasha Levin [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 959b1878cae6..cf156c4a8024 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -401,7 +401,8 @@ int ifs_load_firmware(struct device *dev) if (fw->size != expected_size) { dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", expected_size, fw->size); - return -EINVAL; + ret = -EINVAL; + goto release; } ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); -- Gitee From d0bd89aa71d70d74dbae54debdcd7f381153d6eb Mon Sep 17 00:00:00 2001 From: Serge Hallyn Date: Tue, 5 Jan 2016 20:12:21 +0000 Subject: [PATCH 057/953] anolis: userns: add a sysctl to disable unprivileged user namespace unsharing ANBZ: #8322 commit 5758824b20fa2308ebb5c460874d0ffd73d0d8e4 Ubuntu groovy. It is turned on by default, but can be turned off if admins prefer or, more importantly, if a security vulnerability is found. The intent is to use this as mitigation so long as Ubuntu is on the cutting edge of enablement for things like unprivileged filesystem mounting. (This patch is tweaked from the one currently still in Debian sid, which in turn came from the patch we had in saucy) Signed-off-by: Serge Hallyn [bwh: Remove unneeded binary sysctl bits] [ saf: move extern unprivileged_userns_clone declaration to include/linux/user_namespace.h to conform with 2374c09b1c8a "sysctl: remove all extern declaration from sysctl.c" ] Signed-off-by: Tim Gardner [jingbo: add documentation for the sysctl] Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2781 --- Documentation/admin-guide/sysctl/kernel.rst | 10 ++++++++++ kernel/fork.c | 15 +++++++++++++++ kernel/sysctl.c | 12 ++++++++++++ kernel/user_namespace.c | 6 ++++++ 4 files changed, 43 insertions(+) diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index cf33de56da27..4408fe2f97e7 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1603,6 +1603,16 @@ entry will default to 2 instead of 0. = ============================================================= +unprivileged_userns_clone +========================= + +This value controls if unprivileged users could unshare a new user +namespace. When the value is zero, unprivileged users are not allowed +to unshare a new user namespace. Privileged users (with CAP_SYS_ADMIN) +are not affected and are always capable of unsharing a new user +namespace. + + warn_limit ========== diff --git a/kernel/fork.c b/kernel/fork.c index 177ce7438db6..71b1c8ee328e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -110,6 +110,11 @@ #define CREATE_TRACE_POINTS #include +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#else +#define unprivileged_userns_clone 0 +#endif /* * Minimum number of threads to boot the kernel @@ -2260,6 +2265,10 @@ __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -3413,6 +3422,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; + if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { + err = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto bad_unshare_out; + } + err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f52..ecb22a814e16 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,9 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); static const int six_hundred_forty_kb = 640 * 1024; #endif +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#endif static const int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -2042,6 +2045,15 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, +#endif +#ifdef CONFIG_USER_NS + { + .procname = "unprivileged_userns_clone", + .data = &unprivileged_userns_clone, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #endif { } }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 1d8e47bed3f1..a470898ff62b 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -22,6 +22,12 @@ #include #include +/* + * sysctl determining whether unprivileged users may unshare a new + * userns. Allowed by default + */ +int unprivileged_userns_clone = 1; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); -- Gitee From 5eb560eefc6c60cf4b1dabd0b9835826886a48c4 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 24 Apr 2022 09:54:17 +0800 Subject: [PATCH 058/953] anolis: userns: add a sysctl to control the max depth ANBZ: #8322 Add "sysctl.kernel.userns_max_level" to control the maximum nested level of user namespace. The valid configuration values are 0-33. When configured to zero, user namespace is effectively disabled. Originally the check is "if (parent_ns->level > 32)" and init_user_ns.level is zero, so the actually maximum level is 33 instead of 32. Signed-off-by: Jiang Liu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2781 --- Documentation/admin-guide/sysctl/kernel.rst | 8 ++++++++ kernel/sysctl.c | 11 +++++++++++ kernel/user_namespace.c | 9 ++++++++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 4408fe2f97e7..af3fe24e938e 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1613,6 +1613,14 @@ are not affected and are always capable of unsharing a new user namespace. +userns_max_level +================ + +This value indicates the maximum nested level of user namespace. The +valid configuration values are 0-33. When configured to zero, user +namespace is effectively disabled. + + warn_limit ========== diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ecb22a814e16..0c11d319fa01 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -98,6 +98,8 @@ static const int six_hundred_forty_kb = 640 * 1024; #ifdef CONFIG_USER_NS extern int unprivileged_userns_clone; +extern int userns_max_level; +extern int userns_max_level_max; #endif static const int ngroups_max = NGROUPS_MAX; @@ -2054,6 +2056,15 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "userns_max_level", + .data = &userns_max_level, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &userns_max_level_max, + }, #endif { } }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a470898ff62b..8846049c8fa3 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -28,6 +28,13 @@ */ int unprivileged_userns_clone = 1; +/* + * sysctl determining the maximum of nested level. + * Default to 33 to keep compatible with upstream. + */ +int userns_max_level = 33; +int userns_max_level_max = 33; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); @@ -94,7 +101,7 @@ int create_user_ns(struct cred *new) int ret, i; ret = -ENOSPC; - if (parent_ns->level > 32) + if (parent_ns->level >= userns_max_level) goto fail; ucounts = inc_user_namespaces(parent_ns, owner); -- Gitee From 99340dc0f004c15dc022b36915b437d5c27ae84b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 22 Jan 2024 12:28:50 +0800 Subject: [PATCH 059/953] x86/microcode/amd: Fix snprintf() format string warning in W=1 build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8003 commit 2e9064faccd1a5b9de8c6f4b23d9f4948901cbe9 upstream. Building with GCC 11.x results in the following warning: arch/x86/kernel/cpu/microcode/amd.c: In function ‘find_blobs_in_containers’: arch/x86/kernel/cpu/microcode/amd.c:504:58: error: ‘h.bin’ directive output may be truncated writing 5 bytes into a region of size between 1 and 7 [-Werror=format-truncation=] arch/x86/kernel/cpu/microcode/amd.c:503:17: note: ‘snprintf’ output between 35 and 41 bytes into a destination of size 36 The issue is that GCC does not know that the family can only be a byte (it ultimately comes from CPUID). Suggest the right size to the compiler by marking the argument as char-size ("hh"). While at it, instead of using the slightly more obscure precision specifier use the width with zero padding (over 23000 occurrences in kernel sources, vs 500 for the idiom using the precision). Intel-SIG: commit 2e9064faccd1 x86/microcode/amd: Fix snprintf() format string warning in W=1 build. Microcode restructuring backport. Reported-by: kernel test robot Signed-off-by: Paolo Bonzini Signed-off-by: Ingo Molnar Signed-off-by: Borislav Petkov (AMD) Closes: https://lore.kernel.org/oe-kbuild-all/202308252255.2HPJ6x5Q-lkp@intel.com/ Link: https://lore.kernel.org/r/20231016224858.2829248-1-pbonzini@redhat.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 296b1f327d24..e884a7894be8 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -501,10 +501,10 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), - "amd-ucode/microcode_amd_fam%.2xh.bin", family); + "amd-ucode/microcode_amd_fam%02hhxh.bin", family); else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) snprintf(fw_name, sizeof(fw_name), - "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); + "hygon-ucode/microcode_hygon_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; -- Gitee From 53813f48b48b90af57bb4f553758834138cd163c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:36 +0200 Subject: [PATCH 060/953] x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32() ANBZ: #8003 commit 242db7589460ca94e28c51ffbddd621756f97e11 upstream. Stackprotector cannot work before paging is enabled. The read from the per CPU variable __stack_chk_guard is always accessing the virtual address either directly on UP or via FS on SMP. In physical address mode this results in an access to memory above 3GB. So this works by chance as the hardware returns the same value when there is no RAM at this physical address. When there is RAM populated above 3G then the read is by chance the same as nothing changes that memory during the very early boot stage. Stop relying on pure luck and disable the stack protector for the only C function which is called during early boot before paging is enabled. Remove function tracing from the whole source file as there is no way to trace this at all, but in case of CONFIG_DYNAMIC_FTRACE=n mk_early_pgtbl_32() would access global function tracer variables in physical address mode which again might work by chance. Intel-SIG: commit 242db7589460 x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.156063939@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/head32.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3269a0e23d3a..0000325ab98f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg +CFLAGS_REMOVE_head32.o = -pg CFLAGS_REMOVE_sev.o = -pg CFLAGS_REMOVE_rethook.o = -pg endif diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 246a609f889b..bf678d6f4359 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -70,7 +70,8 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * always zero at this stage. */ void __init mk_early_pgtbl_32(void); -void __init mk_early_pgtbl_32(void) + +void __init __no_stack_protector mk_early_pgtbl_32(void) { #ifdef __pa #undef __pa -- Gitee From 3e55e100cb512eb6c9b3c98bdf1a6a1f8b3ec83a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:25 +0200 Subject: [PATCH 061/953] x86/boot: Use __pa_nodebug() in mk_early_pgtbl_32() ANBZ: #8003 commit 1e2dd572d2b773b5b8882aae66e5f0328d562aa9 upstream. Use the existing macro instead of undefining and redefining __pa(). No functional change. Intel-SIG: commit 1e2dd572d2b7 x86/boot: Use __pa_nodebug() in mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.051625827@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/head32.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index bf678d6f4359..8fe0dd38fff0 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -73,25 +73,21 @@ void __init mk_early_pgtbl_32(void); void __init __no_stack_protector mk_early_pgtbl_32(void) { -#ifdef __pa -#undef __pa -#endif -#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) pte_t pte, *ptep; int i; unsigned long *ptr; /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa(_end) + + const unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); #ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); + pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); #define SET_PL2(pl2, val) { (pl2).pmd = (val); } #else - pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); + pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); #define SET_PL2(pl2, val) { (pl2).pgd = (val); } #endif - ptep = (pte_t *)__pa(__brk_base); + ptep = (pte_t *)__pa_nodebug(__brk_base); pte.pte = PTE_IDENT_ATTR; while ((pte.pte & PTE_PFN_MASK) < limit) { @@ -111,11 +107,11 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) pl2p++; } - ptr = (unsigned long *)__pa(&max_pfn_mapped); + ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; - ptr = (unsigned long *)__pa(&_brk_end); + ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; } -- Gitee From 3d08d1a707fac258d9e2021a19e671fc3f96a0c3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:26 +0200 Subject: [PATCH 062/953] x86/boot/32: De-uglify the 2/3 level paging difference in mk_early_pgtbl_32() ANBZ: #8003 commit a62f4ca106fd250e9247decd100f3905131fc1fe upstream. Move the ifdeffery out of the function and use proper typedefs to make it work for both 2 and 3 level paging. No functional change. [ bp: Move mk_early_pgtbl_32() declaration into a header. ] Intel-SIG: commit a62f4ca106fd x86/boot/32: De-uglify the 2/3 level paging difference in mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.111059491@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/setup.h | 1 + arch/x86/kernel/head32.c | 38 +++++++++++++++++++----------------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f3495623ac99..bf483fcb4e57 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -126,6 +126,7 @@ void clear_bss(void); #ifdef __i386__ asmlinkage void __init __noreturn i386_start_kernel(void); +void __init mk_early_pgtbl_32(void); #else asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 8fe0dd38fff0..2b6599807026 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -69,41 +69,43 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * to the first kernel PMD. Note the upper half of each PMD or PTE are * always zero at this stage. */ -void __init mk_early_pgtbl_32(void); +#ifdef CONFIG_X86_PAE +typedef pmd_t pl2_t; +#define pl2_base initial_pg_pmd +#define SET_PL2(val) { .pmd = (val), } +#else +typedef pgd_t pl2_t; +#define pl2_base initial_page_table +#define SET_PL2(val) { .pgd = (val), } +#endif void __init __no_stack_protector mk_early_pgtbl_32(void) { - pte_t pte, *ptep; - int i; - unsigned long *ptr; /* Enough space to fit pagetables for the low memory linear map */ const unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); -#ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); -#define SET_PL2(pl2, val) { (pl2).pmd = (val); } -#else - pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); -#define SET_PL2(pl2, val) { (pl2).pgd = (val); } -#endif + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + int i; - ptep = (pte_t *)__pa_nodebug(__brk_base); pte.pte = PTE_IDENT_ATTR; while ((pte.pte & PTE_PFN_MASK) < limit) { + pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); - SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); *pl2p = pl2; -#ifndef CONFIG_X86_PAE - /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; -#endif + + if (!IS_ENABLED(CONFIG_X86_PAE)) { + /* Kernel PDE entry */ + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + } + for (i = 0; i < PTRS_PER_PTE; i++) { *ptep = pte; pte.pte += PAGE_SIZE; ptep++; } - pl2p++; } -- Gitee From 31eb342939cb0cdc2451b957f16fe44444f8de83 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:28 +0200 Subject: [PATCH 063/953] x86/boot/32: Restructure mk_early_pgtbl_32() ANBZ: #8003 commit 69ba866db281c768d5ecca909361ea4c4e71d57e upstream. Prepare it for adding a temporary initrd mapping by splitting out the actual map loop. No functional change. Intel-SIG: commit 69ba866db281 x86/boot/32: Restructure mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.175910753@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/head32.c | 42 ++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 2b6599807026..bdce6321fabd 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -79,35 +79,40 @@ typedef pgd_t pl2_t; #define SET_PL2(val) { .pgd = (val), } #endif -void __init __no_stack_protector mk_early_pgtbl_32(void) +static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p, + const unsigned long limit) { - /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa_nodebug(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); - pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); - pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); - unsigned long *ptr; - int i; - - pte.pte = PTE_IDENT_ATTR; - while ((pte.pte & PTE_PFN_MASK) < limit) { - pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); - - *pl2p = pl2; + pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR); + int i; + **pl2p = pl2; if (!IS_ENABLED(CONFIG_X86_PAE)) { /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; } for (i = 0; i < PTRS_PER_PTE; i++) { - *ptep = pte; + **ptep = pte; pte.pte += PAGE_SIZE; - ptep++; + (*ptep)++; } - pl2p++; + (*pl2p)++; } + return pte; +} + +void __init __no_stack_protector mk_early_pgtbl_32(void) +{ + /* Enough space to fit pagetables for the low memory linear map */ + const unsigned long limit = __pa_nodebug(_end) + + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + + pte.pte = PTE_IDENT_ATTR; + pte = init_map(pte, &ptep, &pl2p, limit); ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ @@ -116,4 +121,3 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; } - -- Gitee From 7458af9ff760266d9f33fffbc1d40321cf1ecdc5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:31:50 +0800 Subject: [PATCH 064/953] x86/microcode: Provide CONFIG_MICROCODE_INITRD32 ANBZ: #8003 commit fdbd43819400e74c1c20a646969ea8f71706eb2b upstream. Create an aggregate config switch which covers X86_32, MICROCODE and BLK_DEV_INITRD to avoid lengthy #ifdeffery in upcoming code. Intel-SIG: commit fdbd43819400 x86/microcode: Provide CONFIG_MICROCODE_INITRD32. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.236208250@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c148b13ef777..8d48aa9ad287 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1313,6 +1313,10 @@ config MICROCODE def_bool y depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON +config MICROCODE_INITRD32 + def_bool y + depends on MICROCODE && X86_32 && BLK_DEV_INITRD + config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n -- Gitee From a7f42fc3ee3851f7173b1a4b2a52bcb8524bdfbf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:31 +0200 Subject: [PATCH 065/953] x86/boot/32: Temporarily map initrd for microcode loading ANBZ: #8003 commit 4c585af7180c147062c636a927a2fc2b6a7072f5 upstream. Early microcode loading on 32-bit runs in physical address mode because the initrd is not covered by the initial page tables. That results in a horrible mess all over the microcode loader code. Provide a temporary mapping for the initrd in the initial page tables by appending it to the actual initial mapping starting with a new PGD or PMD depending on the configured page table levels ([non-]PAE). The page table entries are located after _brk_end so they are not permanently using memory space. The mapping is invalidated right away in i386_start_kernel() after the early microcode loader has run. This prepares for removing the physical address mode oddities from all over the microcode loader code, which in turn allows further cleanups. Provide the map and unmap code and document the place where the microcode loader needs to be invoked with a comment. Intel-SIG: commit 4c585af7180c x86/boot/32: Temporarily map initrd for microcode loading. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.292291436@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 2 ++ arch/x86/kernel/head32.c | 54 ++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index bbbe9d744977..5216bf1acc3b 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { } static inline void microcode_bsp_resume(void) { } #endif +extern unsigned long initrd_start_early; + #ifdef CONFIG_CPU_SUP_INTEL /* Intel specific microcode defines. Public for IFS */ struct microcode_header_intel { diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index bdce6321fabd..abdbfd335e13 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -29,11 +29,33 @@ static void __init i386_default_early_setup(void) x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; } +#ifdef CONFIG_MICROCODE_INITRD32 +unsigned long __initdata initrd_start_early; +static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end; + +static void zap_early_initrd_mapping(void) +{ + pte_t *pl2p = initrd_pl2p_start; + + for (; pl2p < initrd_pl2p_end; pl2p++) { + *pl2p = (pte_t){ .pte = 0 }; + + if (!IS_ENABLED(CONFIG_X86_PAE)) + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0}; + } +} +#else +static inline void zap_early_initrd_mapping(void) { } +#endif + asmlinkage __visible void __init __noreturn i386_start_kernel(void) { /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); + /* load_ucode_bsp() */ + zap_early_initrd_mapping(); + cr4_init_shadow(); sanitize_boot_params(&boot_params); @@ -105,9 +127,9 @@ static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t void __init __no_stack_protector mk_early_pgtbl_32(void) { /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa_nodebug(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + struct boot_params __maybe_unused *params; pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); unsigned long *ptr; @@ -120,4 +142,32 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; + +#ifdef CONFIG_MICROCODE_INITRD32 + /* Running on a hypervisor? */ + if (native_cpuid_ecx(1) & BIT(31)) + return; + + params = (struct boot_params *)__pa_nodebug(&boot_params); + if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) + return; + + /* Save the virtual start address */ + ptr = (unsigned long *)__pa_nodebug(&initrd_start_early); + *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET; + *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK; + + /* Save PLP2 for cleanup */ + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; + + limit = (unsigned long)params->hdr.ramdisk_image; + pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit); + limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size; + + init_map(pte, &ptep, &pl2p, limit); + + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; +#endif } -- Gitee From 52c47e9e8ca7f5ed491c2eceeedb3cdcacbde790 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:42:27 +0800 Subject: [PATCH 066/953] x86/microcode/32: Move early loading after paging enable ANBZ: #8003 commit 0b62f6cb07738d7211d926c39f6946b87f72e792 upstream. 32-bit loads microcode before paging is enabled. The commit which introduced that has zero justification in the changelog. The cover letter has slightly more content, but it does not give any technical justification either: "The problem in current microcode loading method is that we load a microcode way, way too late; ideally we should load it before turning paging on. This may only be practical on 32 bits since we can't get to 64-bit mode without paging on, but we should still do it as early as at all possible." Handwaving word salad with zero technical content. Someone claimed in an offlist conversation that this is required for curing the ATOM erratum AAE44/AAF40/AAG38/AAH41. That erratum requires an microcode update in order to make the usage of PSE safe. But during early boot, PSE is completely irrelevant and it is evaluated way later. Neither is it relevant for the AP on single core HT enabled CPUs as the microcode loading on the AP is not doing anything. On dual core CPUs there is a theoretical problem if a split of an executable large page between enabling paging including PSE and loading the microcode happens. But that's only theoretical, it's practically irrelevant because the affected dual core CPUs are 64bit enabled and therefore have paging and PSE enabled before loading the microcode on the second core. So why would it work on 64-bit but not on 32-bit? The erratum: "AAG38 Code Fetch May Occur to Incorrect Address After a Large Page is Split Into 4-Kbyte Pages Problem: If software clears the PS (page size) bit in a present PDE (page directory entry), that will cause linear addresses mapped through this PDE to use 4-KByte pages instead of using a large page after old TLB entries are invalidated. Due to this erratum, if a code fetch uses this PDE before the TLB entry for the large page is invalidated then it may fetch from a different physical address than specified by either the old large page translation or the new 4-KByte page translation. This erratum may also cause speculative code fetches from incorrect addresses." The practical relevance for this is exactly zero because there is no splitting of large text pages during early boot-time, i.e. between paging enable and microcode loading, and neither during CPU hotplug. IOW, this load microcode before paging enable is yet another voodoo programming solution in search of a problem. What's worse is that it causes at least two serious problems: 1) When stackprotector is enabled, the microcode loader code has the stackprotector mechanics enabled. The read from the per CPU variable __stack_chk_guard is always accessing the virtual address either directly on UP or via %fs on SMP. In physical address mode this results in an access to memory above 3GB. So this works by chance as the hardware returns the same value when there is no RAM at this physical address. When there is RAM populated above 3G then the read is by chance the same as nothing changes that memory during the very early boot stage. That's not necessarily true during runtime CPU hotplug. 2) When function tracing is enabled, the relevant microcode loader functions and the functions invoked from there will call into the tracing code and evaluate global and per CPU variables in physical address mode. What could potentially go wrong? Cure this and move the microcode loading after the early paging enable, use the new temporary initrd mapping and remove the gunk in the microcode loader which is required to handle physical address mode. Intel-SIG: commit 0b62f6cb0773 x86/microcode/32: Move early loading after paging enable. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.348298216@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 5 - arch/x86/kernel/cpu/common.c | 12 --- arch/x86/kernel/cpu/microcode/amd.c | 110 +++++++-------------- arch/x86/kernel/cpu/microcode/core.c | 78 ++++----------- arch/x86/kernel/cpu/microcode/intel.c | 116 ++++------------------- arch/x86/kernel/cpu/microcode/internal.h | 2 +- arch/x86/kernel/head32.c | 3 +- arch/x86/kernel/head_32.S | 10 -- arch/x86/kernel/smpboot.c | 12 +-- 9 files changed, 71 insertions(+), 277 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 5216bf1acc3b..78f1eb2532dc 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -70,11 +70,6 @@ static inline u32 intel_get_microcode_revision(void) return rev; } - -void show_ucode_info_early(void); - -#else /* CONFIG_CPU_SUP_INTEL */ -static inline void show_ucode_info_early(void) { } #endif /* !CONFIG_CPU_SUP_INTEL */ #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 73cfac3fc9c4..a906d39c8c96 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2205,8 +2205,6 @@ static inline void setup_getcpu(int cpu) } #ifdef CONFIG_X86_64 -static inline void ucode_cpu_init(int cpu) { } - static inline void tss_setup_ist(struct tss_struct *tss) { /* Set up the per-CPU TSS IST stacks */ @@ -2217,16 +2215,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) /* Only mapped when SEV-ES is active */ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } - #else /* CONFIG_X86_64 */ - -static inline void ucode_cpu_init(int cpu) -{ - show_ucode_info_early(); -} - static inline void tss_setup_ist(struct tss_struct *tss) { } - #endif /* !CONFIG_X86_64 */ static inline void tss_setup_io_bitmap(struct tss_struct *tss) @@ -2282,8 +2272,6 @@ void cpu_init(void) struct task_struct *cur = current; int cpu = raw_smp_processor_id(); - ucode_cpu_init(cpu); - #ifdef CONFIG_NUMA if (this_cpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index e884a7894be8..3a5b64d19f76 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -121,24 +121,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) /* * Check whether there is a valid microcode container file at the beginning - * of @buf of size @buf_size. Set @early to use this function in the early path. + * of @buf of size @buf_size. */ -static bool verify_container(const u8 *buf, size_t buf_size, bool early) +static bool verify_container(const u8 *buf, size_t buf_size) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { - if (!early) - pr_debug("Truncated microcode container header.\n"); - + pr_debug("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { - if (!early) - pr_debug("Invalid magic value (0x%08x).\n", cont_magic); - + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); return false; } @@ -147,23 +143,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated CPU equivalence table at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. */ -static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +static bool verify_equivalence_table(const u8 *buf, size_t buf_size) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; - if (!verify_container(buf, buf_size, early)) + if (!verify_container(buf, buf_size)) return false; cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { - if (!early) - pr_debug("Wrong microcode container equivalence table type: %u.\n", - cont_type); - + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); return false; } @@ -172,9 +165,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { - if (!early) - pr_debug("Truncated equivalence table.\n"); - + pr_debug("Truncated equivalence table.\n"); return false; } @@ -183,22 +174,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated microcode patch section at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { - if (!early) - pr_debug("Truncated patch section.\n"); - + pr_debug("Truncated patch section.\n"); return false; } @@ -207,17 +195,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { - if (!early) - pr_debug("Invalid type field (0x%x) in container file section header.\n", - p_type); - + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { - if (!early) - pr_debug("Patch of size %u too short.\n", p_size); - + pr_debug("Patch of size %u too short.\n", p_size); return false; } @@ -269,7 +253,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * 0: success */ static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) { struct microcode_header_amd *mc_hdr; unsigned int ret; @@ -277,7 +261,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea u16 proc_id; u8 patch_fam; - if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + if (!__verify_patch_section(buf, buf_size, &sh_psize)) return -1; /* @@ -292,16 +276,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { - if (!early) - pr_debug("Patch of size %u truncated.\n", sh_psize); - + pr_debug("Patch of size %u truncated.\n", sh_psize); return -1; } ret = __verify_patch_size(family, sh_psize, buf_size); if (!ret) { - if (!early) - pr_debug("Per-family patch size mismatch.\n"); + pr_debug("Per-family patch size mismatch.\n"); return -1; } @@ -309,8 +290,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - if (!early) - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } @@ -337,7 +317,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u16 eq_id; u8 *buf; - if (!verify_equivalence_table(ucode, size, true)) + if (!verify_equivalence_table(ucode, size)) return 0; buf = ucode; @@ -364,7 +344,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); if (ret < 0) { /* * Patch verification failed, skip to the next container, if @@ -456,14 +436,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; - u32 rev, dummy, *new_rev; bool ret = false; - -#ifdef CONFIG_X86_32 - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); -#else - new_rev = &ucode_new_rev; -#endif + u32 rev, dummy; desc.cpuid_1_eax = cpuid_1_eax; @@ -484,8 +458,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) return ret; if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - ret = true; + ucode_new_rev = mc->hdr.patch_id; + ret = true; } return ret; @@ -517,33 +491,13 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { - struct ucode_cpu_info *uci; struct cpio_data cp; - const char *path; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = (const char *)__pa_nodebug( - "kernel/x86/microcode/HygonGenuine.bin"); - else - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - uci = ucode_cpu_info; - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = "kernel/x86/microcode/HygonGenuine.bin"; - else - path = ucode_path; - use_pa = false; - } if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); /* Needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_1_eax; + ucode_cpu_info->cpu_sig.sig = cpuid_1_eax; *ret = cp; } @@ -578,7 +532,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) else path = ucode_path; - cp = find_microcode_in_initrd(path, false); + cp = find_microcode_in_initrd(path); if (!(cp.data && cp.size)) return -EINVAL; @@ -754,7 +708,7 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) u32 equiv_tbl_len; const u32 *hdr; - if (!verify_equivalence_table(buf, buf_size, false)) + if (!verify_equivalence_table(buf, buf_size)) return 0; hdr = (const u32 *)buf; @@ -800,7 +754,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size, false); + ret = verify_patch(family, fw, leftover, patch_size); if (ret) return ret; @@ -938,7 +892,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) } ret = UCODE_ERROR; - if (!verify_container(fw->data, fw->size, false)) + if (!verify_container(fw->data, fw->size)) goto fw_release; ret = load_microcode_amd(c->x86, fw->data, fw->size); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 98245c19a90d..d755684c2580 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -94,10 +94,7 @@ static bool amd_check_current_patch_level(void) native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); - if (IS_ENABLED(CONFIG_X86_32)) - levels = (u32 *)__pa_nodebug(&final_levels); - else - levels = final_levels; + levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) @@ -109,17 +106,8 @@ static bool amd_check_current_patch_level(void) static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; - -#ifdef CONFIG_X86_32 - const char *cmdline = (const char *)__pa_nodebug(boot_command_line); - const char *option = (const char *)__pa_nodebug(__dis_opt_str); - bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); - -#else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; - bool *res = &dis_ucode_ldr; -#endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not @@ -127,18 +115,18 @@ static bool __init check_loader_disabled_bsp(void) * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) - return *res; + return true; if (x86_cpuid_vendor() == X86_VENDOR_AMD || x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) - return *res; + return true; } if (cmdline_find_option_bool(cmdline, option) <= 0) - *res = false; + dis_ucode_ldr = false; - return *res; + return dis_ucode_ldr; } void __init load_ucode_bsp(void) @@ -180,20 +168,11 @@ void __init load_ucode_bsp(void) load_ucode_amd_early(cpuid_1_eax); } -static bool check_loader_disabled_ap(void) -{ -#ifdef CONFIG_X86_32 - return *((bool *)__pa_nodebug(&dis_ucode_ldr)); -#else - return dis_ucode_ldr; -#endif -} - void load_ucode_ap(void) { unsigned int cpuid_1_eax; - if (check_loader_disabled_ap()) + if (dis_ucode_ldr) return; cpuid_1_eax = native_cpuid_eax(1); @@ -247,40 +226,28 @@ static int __init save_microcode_in_initrd(void) return ret; } -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) +struct cpio_data find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 - struct boot_params *params; - - if (use_pa) - params = (struct boot_params *)__pa_nodebug(&boot_params); - else - params = &boot_params; - - size = params->hdr.ramdisk_size; - - /* - * Set start only if we have an initrd image. We cannot use initrd_start - * because it is not set that early yet. - */ + size = boot_params.hdr.ramdisk_size; + /* Early load on BSP has a temporary mapping. */ if (size) - start = params->hdr.ramdisk_image; + start = initrd_start_early; -# else /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; - start += PAGE_OFFSET; } -# endif +#endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start @@ -291,23 +258,10 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) * initrd_gone is for the hotplug case where we've thrown out initrd * already. */ - if (!use_pa) { - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; - if (initrd_start) - start = initrd_start; - } else { - /* - * The picture with physical addresses is a bit different: we - * need to get the *physical* address to which the ramdisk was - * relocated, i.e., relocated_ramdisk (not initrd_start) and - * since we're running from physical addresses, we need to access - * relocated_ramdisk through its *physical* address too. - */ - u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); - if (*rr) - start = *rr; - } + if (initrd_gone) + return (struct cpio_data){ NULL, 0, "" }; + if (initrd_start) + start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 94dd6af9c963..24a5c8b594c6 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -319,15 +319,8 @@ static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigne if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) return; - /* - * Save for early loading. On 32-bit, that needs to be a physical - * address as the APs are running from physical addresses, before - * paging has been enabled. - */ - if (IS_ENABLED(CONFIG_X86_32)) - intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); - else - intel_ucode_patch = p->data; + /* Save for early loading */ + intel_ucode_patch = p->data; } /* @@ -420,66 +413,10 @@ static bool load_builtin_intel_microcode(struct cpio_data *cp) return false; } -static void print_ucode_info(int old_rev, int new_rev, unsigned int date) -{ - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, - new_rev, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); -} - -#ifdef CONFIG_X86_32 - -static int delay_ucode_info; -static int current_mc_date; -static int early_old_rev; - -/* - * Print early updated ucode info after printk works. This is delayed info dump. - */ -void show_ucode_info_early(void) -{ - struct ucode_cpu_info uci; - - if (delay_ucode_info) { - intel_cpu_collect_info(&uci); - print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); - delay_ucode_info = 0; - } -} - -/* - * At this point, we can not call printk() yet. Delay printing microcode info in - * show_ucode_info_early() until printk() works. - */ -static void print_ucode(int old_rev, int new_rev, int date) -{ - int *delay_ucode_info_p; - int *current_mc_date_p; - int *early_old_rev_p; - - delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); - current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); - early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); - - *delay_ucode_info_p = 1; - *current_mc_date_p = date; - *early_old_rev_p = old_rev; -} -#else - -static inline void print_ucode(int old_rev, int new_rev, int date) -{ - print_ucode_info(old_rev, new_rev, date); -} -#endif - -static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) +static int apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; - u32 rev, old_rev; + u32 rev, old_rev, date; mc = uci->mc; if (!mc) @@ -513,11 +450,9 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) uci->cpu_sig.rev = rev; - if (early) - print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); - else - print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); - + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); return 0; } @@ -535,7 +470,7 @@ int __init save_microcode_in_initrd_intel(void) intel_ucode_patch = NULL; if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path, false); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return 0; @@ -551,21 +486,11 @@ int __init save_microcode_in_initrd_intel(void) */ static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) { - static const char *path; struct cpio_data cp; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - path = ucode_path; - use_pa = false; - } /* try built-in microcode first */ if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return NULL; @@ -586,30 +511,21 @@ void __init load_ucode_intel_bsp(void) uci.mc = patch; - apply_microcode_early(&uci, true); + apply_microcode_early(&uci); } void load_ucode_intel_ap(void) { - struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; - if (IS_ENABLED(CONFIG_X86_32)) - iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); - else - iup = &intel_ucode_patch; - - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) + if (!intel_ucode_patch) { + intel_ucode_patch = __load_ucode_intel(&uci); + if (!intel_ucode_patch) return; - - *iup = patch; } - uci.mc = *iup; - - apply_microcode_early(&uci, true); + uci.mc = intel_ucode_patch; + apply_microcode_early(&uci); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) @@ -647,7 +563,7 @@ void reload_ucode_intel(void) uci.mc = p; - apply_microcode_early(&uci, false); + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 9e76fe430812..96df3da32346 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -44,7 +44,7 @@ struct microcode_ops { }; extern struct ucode_cpu_info ucode_cpu_info[]; -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); +struct cpio_data find_microcode_in_initrd(const char *path); #define MAX_UCODE_COUNT 128 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index abdbfd335e13..de001b2146ab 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,7 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); - /* load_ucode_bsp() */ + load_ucode_bsp(); zap_early_initrd_mapping(); cr4_init_shadow(); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c9318993f959..63f6ff4b28eb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -118,11 +118,6 @@ SYM_CODE_START(startup_32) movl %eax, pa(olpc_ofw_pgd) #endif -#ifdef CONFIG_MICROCODE - /* Early load ucode on BSP. */ - call load_ucode_bsp -#endif - /* Create early pagetables. */ call mk_early_pgtbl_32 @@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp) movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp -#ifdef CONFIG_MICROCODE - /* Early load ucode on AP. */ - call load_ucode_ap -#endif - .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 54125b4cc46a..fac3b8a523ac 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -258,12 +258,9 @@ static void notrace start_secondary(void *unused) cpu_init_exception_handling(); /* - * 32-bit systems load the microcode from the ASM startup code for - * historical reasons. - * - * On 64-bit systems load it before reaching the AP alive - * synchronization point below so it is not part of the full per - * CPU serialized bringup part when "parallel" bringup is enabled. + * Load the microcode before reaching the AP alive synchronization + * point below so it is not part of the full per CPU serialized + * bringup part when "parallel" bringup is enabled. * * That's even safe when hyperthreading is enabled in the CPU as * the core code starts the primary threads first and leaves the @@ -276,8 +273,7 @@ static void notrace start_secondary(void *unused) * CPUID, MSRs etc. must be strictly serialized to maintain * software state correctness. */ - if (IS_ENABLED(CONFIG_X86_64)) - load_ucode_ap(); + load_ucode_ap(); /* * Synchronization point with the hotplug core. Sets this CPUs -- Gitee From 7c82cb5a702e677ab269fc44d1ff2dabc69e7464 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Tue, 17 Oct 2023 23:23:33 +0200 Subject: [PATCH 067/953] x86/microcode/intel: Rip out mixed stepping support for Intel CPUs ANBZ: #8003 commit ae76d951f6537001bdf77894d19cd4a446de337e upstream. Mixed steppings aren't supported on Intel CPUs. Only one microcode patch is required for the entire system. The caching of microcode blobs which match the family and model is therefore pointless and in fact is dysfunctional as CPU hotplug updates use only a single microcode blob, i.e. the one where *intel_ucode_patch points to. Remove the microcode cache and make it an AMD local feature. [ tglx: - save only at the end. Otherwise random microcode ends up in the pointer for early loading - free the ucode patch pointer in save_microcode_patch() only after kmemdup() has succeeded, as reported by Andrew Cooper ] Intel-SIG: commit ae76d951f653 x86/microcode/intel: Rip out mixed stepping support for Intel CPUs. Microcode restructuring backport. Originally-by: Thomas Gleixner Signed-off-by: Ashok Raj Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.404362809@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 10 ++ arch/x86/kernel/cpu/microcode/core.c | 2 - arch/x86/kernel/cpu/microcode/intel.c | 133 ++++------------------- arch/x86/kernel/cpu/microcode/internal.h | 10 -- 4 files changed, 34 insertions(+), 121 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 3a5b64d19f76..93156848df12 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -37,6 +37,16 @@ #include "internal.h" +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +static LIST_HEAD(microcode_cache); + #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index d755684c2580..95bd2b43d720 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -50,8 +50,6 @@ static bool dis_ucode_ldr = true; bool initrd_gone; -LIST_HEAD(microcode_cache); - /* * Synchronization. * diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 24a5c8b594c6..03a55bfa88c5 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -33,10 +33,10 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch; +static struct microcode_intel *intel_ucode_patch __read_mostly; /* last level cache size per core */ -static int llc_size_per_core; +static int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -253,74 +253,17 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev return intel_find_matching_signature(mc, csig, cpf); } -static struct ucode_patch *memdup_patch(void *data, unsigned int size) +static void save_microcode_patch(void *data, unsigned int size) { - struct ucode_patch *p; - - p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); - if (!p) - return NULL; - - p->data = kmemdup(data, size, GFP_KERNEL); - if (!p->data) { - kfree(p); - return NULL; - } - - return p; -} - -static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) -{ - struct microcode_header_intel *mc_hdr, *mc_saved_hdr; - struct ucode_patch *iter, *tmp, *p = NULL; - bool prev_found = false; - unsigned int sig, pf; - - mc_hdr = (struct microcode_header_intel *)data; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - mc_saved_hdr = (struct microcode_header_intel *)iter->data; - sig = mc_saved_hdr->sig; - pf = mc_saved_hdr->pf; - - if (intel_find_matching_signature(data, sig, pf)) { - prev_found = true; - - if (mc_hdr->rev <= mc_saved_hdr->rev) - continue; - - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer %p\n", data); - else { - list_replace(&iter->plist, &p->plist); - kfree(iter->data); - kfree(iter); - } - } - } - - /* - * There weren't any previous patches found in the list cache; save the - * newly found. - */ - if (!prev_found) { - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer for %p\n", data); - else - list_add_tail(&p->plist, µcode_cache); - } + struct microcode_header_intel *p; + p = kmemdup(data, size, GFP_KERNEL); if (!p) return; - if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) - return; - + kfree(intel_ucode_patch); /* Save for early loading */ - intel_ucode_patch = p->data; + intel_ucode_patch = (struct microcode_intel *)p; } /* @@ -332,6 +275,7 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; + u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; while (size) { @@ -341,8 +285,7 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); - if (!mc_size || - mc_size > size || + if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; @@ -354,31 +297,16 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) continue; } - if (save) { - save_microcode_patch(uci, data, mc_size); + /* BSP scan: Check whether there is newer microcode */ + if (!save && cur_rev >= mc_header->rev) goto next; - } - - - if (!patch) { - if (!has_newer_microcode(data, - uci->cpu_sig.sig, - uci->cpu_sig.pf, - uci->cpu_sig.rev)) - goto next; - } else { - struct microcode_header_intel *phdr = &patch->hdr; - - if (!has_newer_microcode(data, - phdr->sig, - phdr->pf, - phdr->rev)) - goto next; - } + /* Save scan: Check whether there is newer or matching microcode */ + if (save && cur_rev != mc_header->rev) + goto next; - /* We have a newer patch, save it. */ patch = data; + cur_rev = mc_header->rev; next: data += mc_size; @@ -387,6 +315,9 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) if (size) return NULL; + if (save && patch) + save_microcode_patch(patch, mc_size); + return patch; } @@ -528,26 +459,10 @@ void load_ucode_intel_ap(void) apply_microcode_early(&uci); } -static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +/* Accessor for microcode pointer */ +static struct microcode_intel *ucode_get_patch(void) { - struct microcode_header_intel *phdr; - struct ucode_patch *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - - phdr = (struct microcode_header_intel *)iter->data; - - if (phdr->rev <= uci->cpu_sig.rev) - continue; - - if (!intel_find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) - continue; - - return iter->data; - } - return NULL; + return intel_ucode_patch; } void reload_ucode_intel(void) @@ -557,7 +472,7 @@ void reload_ucode_intel(void) intel_cpu_collect_info(&uci); - p = find_patch(&uci); + p = ucode_get_patch(); if (!p) return; @@ -601,7 +516,7 @@ static enum ucode_state apply_microcode_intel(int cpu) return UCODE_ERROR; /* Look for a newer patch in our cache: */ - mc = find_patch(uci); + mc = ucode_get_patch(); if (!mc) { mc = uci->mc; if (!mc) @@ -730,7 +645,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) uci->mc = (struct microcode_intel *)new_mc; /* Save for CPU hotplug */ - save_microcode_patch(uci, new_mc, new_mc_size); + save_microcode_patch(new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 96df3da32346..12eb95557bdf 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -8,16 +8,6 @@ #include #include -struct ucode_patch { - struct list_head plist; - void *data; /* Intel uses only this one */ - unsigned int size; - u32 patch_id; - u16 equiv_cpu; -}; - -extern struct list_head microcode_cache; - struct device; enum ucode_state { -- Gitee From 88e555e2b1d460179097cf480569c385e75da1d0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:39 +0200 Subject: [PATCH 068/953] x86/microcode/intel: Simplify scan_microcode() ANBZ: #8003 commit b0f0bf5eef5fac6ba30b7cac15ca4cb01f8a6ca9 upstream. Make it readable and comprehensible. Intel-SIG: commit b0f0bf5eef5f x86/microcode/intel: Simplify scan_microcode(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.271940980@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 28 +++++++-------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 03a55bfa88c5..26f759b7b7fa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -266,22 +266,16 @@ static void save_microcode_patch(void *data, unsigned int size) intel_ucode_patch = (struct microcode_intel *)p; } -/* - * Get microcode matching with BSP's model. Only CPUs with the same model as - * BSP can stay in the platform. - */ -static struct microcode_intel * -scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) +/* Scan CPIO for microcode matching the boot CPU's family, model, stepping */ +static struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; - while (size) { - if (size < sizeof(struct microcode_header_intel)) - break; - + for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) { mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); @@ -289,27 +283,19 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - size -= mc_size; - - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { - data += mc_size; + if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - } /* BSP scan: Check whether there is newer microcode */ if (!save && cur_rev >= mc_header->rev) - goto next; + continue; /* Save scan: Check whether there is newer or matching microcode */ if (save && cur_rev != mc_header->rev) - goto next; + continue; patch = data; cur_rev = mc_header->rev; - -next: - data += mc_size; } if (size) -- Gitee From 56211bca0f37d5f0a27793f1c78438474ab13bac Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:40 +0200 Subject: [PATCH 069/953] x86/microcode/intel: Simplify and rename generic_load_microcode() ANBZ: #8003 commit 6b072022ab2e1e83b7588144ee0080f7197b71da upstream. so it becomes less obfuscated and rename it because there is nothing generic about it. Intel-SIG: commit 6b072022ab2e x86/microcode/intel: Simplify and rename generic_load_microcode(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.330295409@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 47 ++++++++++----------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 26f759b7b7fa..9463b148e1c6 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -240,19 +240,6 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) -{ - struct microcode_header_intel *mc_hdr = mc; - - if (mc_hdr->rev <= new_rev) - return 0; - - return intel_find_matching_signature(mc, csig, cpf); -} - static void save_microcode_patch(void *data, unsigned int size) { struct microcode_header_intel *p; @@ -559,14 +546,12 @@ static enum ucode_state apply_microcode_intel(int cpu) return ret; } -static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) +static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; unsigned int curr_mc_size = 0, new_mc_size = 0; - enum ucode_state ret = UCODE_OK; - int new_rev = uci->cpu_sig.rev; + int cur_rev = uci->cpu_sig.rev; u8 *new_mc = NULL, *mc = NULL; - unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; @@ -583,6 +568,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) pr_err("error! Bad data in microcode data file (totalsize too small)\n"); break; } + data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); @@ -605,16 +591,17 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) break; } - csig = uci->cpu_sig.sig; - cpf = uci->cpu_sig.pf; - if (has_newer_microcode(mc, csig, cpf, new_rev)) { - vfree(new_mc); - new_rev = mc_header.rev; - new_mc = mc; - new_mc_size = mc_size; - mc = NULL; /* trigger new vmalloc */ - ret = UCODE_NEW; - } + if (cur_rev >= mc_header.rev) + continue; + + if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) + continue; + + vfree(new_mc); + cur_rev = mc_header.rev; + new_mc = mc; + new_mc_size = mc_size; + mc = NULL; } vfree(mc); @@ -634,9 +621,9 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) save_microcode_patch(new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); + cpu, cur_rev, uci->cpu_sig.rev); - return ret; + return UCODE_NEW; } static bool is_blacklisted(unsigned int cpu) @@ -685,7 +672,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); - ret = generic_load_microcode(cpu, &iter); + ret = parse_microcode_blobs(cpu, &iter); release_firmware(firmware); -- Gitee From 8cbbd6988d104ae11afd20e02ffb7d93243af2f0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:41 +0200 Subject: [PATCH 070/953] x86/microcode/intel: Cleanup code further ANBZ: #8003 commit 0177669ee61de4dc641f9ad86a3df6f22327cf6c upstream. Sanitize the microcode scan loop, fixup printks and move the loading function for builtin microcode next to the place where it is used and mark it __init. Intel-SIG: commit 0177669ee61d x86/microcode/intel: Cleanup code further. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.389400871@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 76 +++++++++++---------------- 1 file changed, 32 insertions(+), 44 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 9463b148e1c6..d6ff6ebc624b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -36,7 +36,7 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; static struct microcode_intel *intel_ucode_patch __read_mostly; /* last level cache size per core */ -static int llc_size_per_core __ro_after_init; +static unsigned int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -294,29 +294,6 @@ static struct microcode_intel *scan_microcode(void *data, size_t size, return patch; } -static bool load_builtin_intel_microcode(struct cpio_data *cp) -{ - unsigned int eax = 1, ebx, ecx = 0, edx; - struct firmware fw; - char name[30]; - - if (IS_ENABLED(CONFIG_X86_32)) - return false; - - native_cpuid(&eax, &ebx, &ecx, &edx); - - sprintf(name, "intel-ucode/%02x-%02x-%02x", - x86_family(eax), x86_model(eax), x86_stepping(eax)); - - if (firmware_request_builtin(&fw, name)) { - cp->size = fw.size; - cp->data = (void *)fw.data; - return true; - } - - return false; -} - static int apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -360,6 +337,28 @@ static int apply_microcode_early(struct ucode_cpu_info *uci) return 0; } +static bool load_builtin_intel_microcode(struct cpio_data *cp) +{ + unsigned int eax = 1, ebx, ecx = 0, edx; + struct firmware fw; + char name[30]; + + if (IS_ENABLED(CONFIG_X86_32)) + return false; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + sprintf(name, "intel-ucode/%02x-%02x-%02x", + x86_family(eax), x86_model(eax), x86_stepping(eax)); + + if (firmware_request_builtin(&fw, name)) { + cp->size = fw.size; + cp->data = (void *)fw.data; + return true; + } + return false; +} + int __init save_microcode_in_initrd_intel(void) { struct ucode_cpu_info uci; @@ -432,25 +431,16 @@ void load_ucode_intel_ap(void) apply_microcode_early(&uci); } -/* Accessor for microcode pointer */ -static struct microcode_intel *ucode_get_patch(void) -{ - return intel_ucode_patch; -} - void reload_ucode_intel(void) { - struct microcode_intel *p; struct ucode_cpu_info uci; intel_cpu_collect_info(&uci); - p = ucode_get_patch(); - if (!p) + uci.mc = intel_ucode_patch; + if (!uci.mc) return; - uci.mc = p; - apply_microcode_early(&uci); } @@ -488,8 +478,7 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - /* Look for a newer patch in our cache: */ - mc = ucode_get_patch(); + mc = intel_ucode_patch; if (!mc) { mc = uci->mc; if (!mc) @@ -680,18 +669,17 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) } static struct microcode_ops microcode_intel_ops = { - .request_microcode_fw = request_microcode_fw, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode_intel, }; -static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); - - return (int)llc_size; + llc_size_per_core = (unsigned int)llc_size; } struct microcode_ops * __init init_intel_microcode(void) @@ -704,7 +692,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } - llc_size_per_core = calc_llc_size_per_core(c); + calc_llc_size_per_core(c); return µcode_intel_ops; } -- Gitee From 2a7c2be0e0bcf36158777a0264aa83d35c647c01 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:50:13 +0800 Subject: [PATCH 071/953] x86/microcode/intel: Simplify early loading ANBZ: #8003 commit dd5e3e3ca6ac011582a9f3f987493bf6741568c0 upstream. The early loading code is overly complicated: - It scans the builtin/initrd for microcode not only on the BSP, but also on all APs during early boot and then later in the boot process it scans again to duplicate and save the microcode before initrd goes away. That's a pointless exercise because this can be simply done before bringing up the APs when the memory allocator is up and running. - Saving the microcode from within the scan loop is completely non-obvious and a left over of the microcode cache. This can be done at the call site now which makes it obvious. Rework the code so that only the BSP scans the builtin/initrd microcode once during early boot and save it away in an early initcall for later use. [ bp: Test and fold in a fix from tglx ontop which handles the need to distinguish what save_microcode() does depending on when it is called: - when on the BSP during early load, it needs to find a newer revision than the one currently loaded on the BSP - later, before SMP init, it still runs on the BSP and gets the BSP revision just loaded and uses that revision to know which patch to save for the APs. For that it needs to find the exact one as on the BSP. ] Intel-SIG: commit dd5e3e3ca6ac x86/microcode/intel: Simplify early loading. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.629085215@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 6 +- arch/x86/kernel/cpu/microcode/intel.c | 163 +++++++++++------------ arch/x86/kernel/cpu/microcode/internal.h | 3 +- 3 files changed, 79 insertions(+), 93 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 95bd2b43d720..f6e28d033f6e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -46,7 +46,7 @@ static const struct microcode_ops *microcode_ops; #else static struct microcode_ops *microcode_ops; #endif -static bool dis_ucode_ldr = true; +bool dis_ucode_ldr = true; bool initrd_gone; @@ -203,10 +203,6 @@ static int __init save_microcode_in_initrd(void) } switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - if (c->x86 >= 6) - ret = save_microcode_in_initrd_intel(); - break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index d6ff6ebc624b..9b6614490113 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -32,8 +32,10 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; +#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL) + /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch __read_mostly; +static struct microcode_intel *ucode_patch_va __read_mostly; /* last level cache size per core */ static unsigned int llc_size_per_core __ro_after_init; @@ -240,22 +242,30 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -static void save_microcode_patch(void *data, unsigned int size) +static void update_ucode_pointer(struct microcode_intel *mc) { - struct microcode_header_intel *p; + kfree(ucode_patch_va); + + /* + * Save the virtual address for early loading and for eventual free + * on late loading. + */ + ucode_patch_va = mc; +} - p = kmemdup(data, size, GFP_KERNEL); - if (!p) - return; +static void save_microcode_patch(struct microcode_intel *patch) +{ + struct microcode_intel *mc; - kfree(intel_ucode_patch); - /* Save for early loading */ - intel_ucode_patch = (struct microcode_intel *)p; + mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL); + if (mc) + update_ucode_pointer(mc); } -/* Scan CPIO for microcode matching the boot CPU's family, model, stepping */ -static struct microcode_intel *scan_microcode(void *data, size_t size, - struct ucode_cpu_info *uci, bool save) +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static __init struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, + bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; @@ -273,35 +283,35 @@ static struct microcode_intel *scan_microcode(void *data, size_t size, if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - /* BSP scan: Check whether there is newer microcode */ - if (!save && cur_rev >= mc_header->rev) - continue; - - /* Save scan: Check whether there is newer or matching microcode */ - if (save && cur_rev != mc_header->rev) + /* + * For saving the early microcode, find the matching revision which + * was loaded on the BSP. + * + * On the BSP during early boot, find a newer revision than + * actually loaded in the CPU. + */ + if (save) { + if (cur_rev != mc_header->rev) + continue; + } else if (cur_rev >= mc_header->rev) { continue; + } patch = data; cur_rev = mc_header->rev; } - if (size) - return NULL; - - if (save && patch) - save_microcode_patch(patch, mc_size); - - return patch; + return size ? NULL : patch; } -static int apply_microcode_early(struct ucode_cpu_info *uci) +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; u32 rev, old_rev, date; mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive @@ -327,17 +337,17 @@ static int apply_microcode_early(struct ucode_cpu_info *uci) rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) - return -1; + return UCODE_ERROR; uci->cpu_sig.rev = rev; date = mc->hdr.date; pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); - return 0; + return UCODE_UPDATED; } -static bool load_builtin_intel_microcode(struct cpio_data *cp) +static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { unsigned int eax = 1, ebx, ecx = 0, edx; struct firmware fw; @@ -359,89 +369,71 @@ static bool load_builtin_intel_microcode(struct cpio_data *cp) return false; } -int __init save_microcode_in_initrd_intel(void) +static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save) { - struct ucode_cpu_info uci; struct cpio_data cp; - /* - * initrd is going away, clear patch ptr. We will scan the microcode one - * last time before jettisoning and save a patch, if found. Then we will - * update that pointer too, with a stable patch address to use when - * resuming the cores. - */ - intel_ucode_patch = NULL; - if (!load_builtin_intel_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) - return 0; + return NULL; - intel_cpu_collect_info(&uci); + intel_cpu_collect_info(uci); - scan_microcode(cp.data, cp.size, &uci, true); - return 0; + return scan_microcode(cp.data, cp.size, uci, save); } /* - * @res_patch, output: a pointer to the patch we found. + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. */ -static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) +static int __init save_builtin_microcode(void) { - struct cpio_data cp; - - /* try built-in microcode first */ - if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path); + struct ucode_cpu_info uci; - if (!(cp.data && cp.size)) - return NULL; + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; - intel_cpu_collect_info(uci); + if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; - return scan_microcode(cp.data, cp.size, uci, false); + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) + save_microcode_patch(uci.mc); + return 0; } +early_initcall(save_builtin_microcode); +/* Load microcode on BSP from initrd or builtin blobs */ void __init load_ucode_intel_bsp(void) { - struct microcode_intel *patch; struct ucode_cpu_info uci; - patch = __load_ucode_intel(&uci); - if (!patch) - return; - - uci.mc = patch; - - apply_microcode_early(&uci); + uci.mc = get_microcode_blob(&uci, false); + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) + ucode_patch_va = UCODE_BSP_LOADED; } void load_ucode_intel_ap(void) { struct ucode_cpu_info uci; - if (!intel_ucode_patch) { - intel_ucode_patch = __load_ucode_intel(&uci); - if (!intel_ucode_patch) - return; - } - - uci.mc = intel_ucode_patch; - apply_microcode_early(&uci); + uci.mc = ucode_patch_va; + if (uci.mc) + apply_microcode_early(&uci); } +/* Reload microcode on resume */ void reload_ucode_intel(void) { - struct ucode_cpu_info uci; - - intel_cpu_collect_info(&uci); - - uci.mc = intel_ucode_patch; - if (!uci.mc) - return; + struct ucode_cpu_info uci = { .mc = ucode_patch_va, }; - apply_microcode_early(&uci); + if (uci.mc) + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) @@ -478,7 +470,7 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - mc = intel_ucode_patch; + mc = ucode_patch_va; if (!mc) { mc = uci->mc; if (!mc) @@ -538,8 +530,8 @@ static enum ucode_state apply_microcode_intel(int cpu) static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - unsigned int curr_mc_size = 0, new_mc_size = 0; int cur_rev = uci->cpu_sig.rev; + unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; while (iov_iter_count(iter)) { @@ -589,7 +581,6 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) vfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; - new_mc_size = mc_size; mc = NULL; } @@ -603,11 +594,11 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!new_mc) return UCODE_NFOUND; - vfree(uci->mc); - uci->mc = (struct microcode_intel *)new_mc; - /* Save for CPU hotplug */ - save_microcode_patch(new_mc, new_mc_size); + save_microcode_patch((struct microcode_intel *)new_mc); + uci->mc = ucode_patch_va; + + vfree(new_mc); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, cur_rev, uci->cpu_sig.rev); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 12eb95557bdf..9428ffcd7d79 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -90,6 +90,7 @@ static inline unsigned int x86_cpuid_family(void) return x86_family(eax); } +extern bool dis_ucode_ldr; extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD @@ -119,13 +120,11 @@ static const inline struct microcode_ops *init_hygon_microcode(void) { return NU #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -int save_microcode_in_initrd_intel(void); void reload_ucode_intel(void); struct microcode_ops *init_intel_microcode(void); #else /* CONFIG_CPU_SUP_INTEL */ static inline void load_ucode_intel_bsp(void) { } static inline void load_ucode_intel_ap(void) { } -static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; } static inline void reload_ucode_intel(void) { } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } #endif /* !CONFIG_CPU_SUP_INTEL */ -- Gitee From 16652f3d8aee1b093c8adae8a3847b867ff0f454 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:44 +0200 Subject: [PATCH 072/953] x86/microcode/intel: Save the microcode only after a successful late-load ANBZ: #8003 commit 2a1dada3d1cf8f80a27663653a371d99dbf5d540 upstream. There are situations where the late microcode is loaded into memory but is not applied: 1) The rendezvous fails 2) The microcode is rejected by the CPUs If any of this happens then the pointer which was updated at firmware load time is stale and subsequent CPU hotplug operations either fail to update or create inconsistent microcode state. Save the loaded microcode in a separate pointer before the late load is attempted and when successful, update the hotplug pointer accordingly via a new microcode_ops callback. Remove the pointless fallback in the loader to a microcode pointer which is never populated. Intel-SIG: commit 2a1dada3d1cf x86/microcode/intel: Save the microcode only after a successful late-load. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.505491309@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 4 ++++ arch/x86/kernel/cpu/microcode/intel.c | 30 ++++++++++++------------ arch/x86/kernel/cpu/microcode/internal.h | 1 + 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f6e28d033f6e..a0751b0491b1 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -421,6 +421,10 @@ static int microcode_reload_late(void) store_cpu_caps(&prev_info); ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + + if (microcode_ops->finalize_late_load) + microcode_ops->finalize_late_load(ret); + if (!ret) { pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 9b6614490113..076133b09cc7 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -36,6 +36,7 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *ucode_patch_va __read_mostly; +static struct microcode_intel *ucode_patch_late __read_mostly; /* last level cache size per core */ static unsigned int llc_size_per_core __ro_after_init; @@ -470,12 +471,9 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - mc = ucode_patch_va; - if (!mc) { - mc = uci->mc; - if (!mc) - return UCODE_NFOUND; - } + mc = ucode_patch_late; + if (!mc) + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive @@ -594,15 +592,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!new_mc) return UCODE_NFOUND; - /* Save for CPU hotplug */ - save_microcode_patch((struct microcode_intel *)new_mc); - uci->mc = ucode_patch_va; - - vfree(new_mc); - - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, cur_rev, uci->cpu_sig.rev); - + ucode_patch_late = (struct microcode_intel *)new_mc; return UCODE_NEW; } @@ -659,10 +649,20 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) return ret; } +static void finalize_late_load(int result) +{ + if (!result) + save_microcode_patch(ucode_patch_late); + + vfree(ucode_patch_late); + ucode_patch_late = NULL; +} + static struct microcode_ops microcode_intel_ops = { .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, .apply_microcode = apply_microcode_intel, + .finalize_late_load = finalize_late_load, }; static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 9428ffcd7d79..86a249f69bef 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -31,6 +31,7 @@ struct microcode_ops { */ enum ucode_state (*apply_microcode)(int cpu); int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); }; extern struct ucode_cpu_info ucode_cpu_info[]; -- Gitee From 7ed60a4de66402fcc1c5cc8f26e3b2ae96a85575 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:45 +0200 Subject: [PATCH 073/953] x86/microcode/intel: Switch to kvmalloc() ANBZ: #8003 commit f24f204405f9875bc539c6e88553fd5ac913c867 upstream. Microcode blobs are getting larger and might soon reach the kmalloc() limit. Switch over kvmalloc(). Intel-SIG: commit f24f204405f9 x86/microcode/intel: Switch to kvmalloc(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.564323243@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 48 ++++++++++++++------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 076133b09cc7..dd2d3fde8d06 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -245,7 +244,7 @@ EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); static void update_ucode_pointer(struct microcode_intel *mc) { - kfree(ucode_patch_va); + kvfree(ucode_patch_va); /* * Save the virtual address for early loading and for eventual free @@ -256,11 +255,14 @@ static void update_ucode_pointer(struct microcode_intel *mc) static void save_microcode_patch(struct microcode_intel *patch) { + unsigned int size = get_totalsize(&patch->hdr); struct microcode_intel *mc; - mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL); + mc = kvmemdup(patch, size, GFP_KERNEL); if (mc) update_ucode_pointer(mc); + else + pr_err("Unable to allocate microcode memory size: %u\n", size); } /* Scan blob for microcode matching the boot CPUs family, model, stepping */ @@ -539,36 +541,34 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); - break; + goto fail; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); - break; + goto fail; } - data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); - break; + goto fail; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - vfree(mc); - mc = vmalloc(mc_size); + kvfree(mc); + mc = kvmalloc(mc_size, GFP_KERNEL); if (!mc) - break; + goto fail; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { - break; - } + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) + goto fail; if (cur_rev >= mc_header.rev) continue; @@ -576,24 +576,26 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - vfree(new_mc); + kvfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; mc = NULL; } - vfree(mc); - - if (iov_iter_count(iter)) { - vfree(new_mc); - return UCODE_ERROR; - } + if (iov_iter_count(iter)) + goto fail; + kvfree(mc); if (!new_mc) return UCODE_NFOUND; ucode_patch_late = (struct microcode_intel *)new_mc; return UCODE_NEW; + +fail: + kvfree(mc); + kvfree(new_mc); + return UCODE_ERROR; } static bool is_blacklisted(unsigned int cpu) @@ -652,9 +654,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) static void finalize_late_load(int result) { if (!result) - save_microcode_patch(ucode_patch_late); - - vfree(ucode_patch_late); + update_ucode_pointer(ucode_patch_late); + else + kvfree(ucode_patch_late); ucode_patch_late = NULL; } -- Gitee From 96d18e7f662e036b7e1e4188e7c375b335c17331 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:44 +0200 Subject: [PATCH 074/953] x86/microcode/intel: Unify microcode apply() functions ANBZ: #8003 commit 3973718cff1e3a5d88ea78ec28ecca2afa60b30b upstream. Deduplicate the early and late apply() functions. [ bp: Rename the function which does the actual application to __apply_microcode() to differentiate it from microcode_ops.apply_microcode(). ] Intel-SIG: commit 3973718cff1e x86/microcode/intel: Unify microcode apply() functions. Microcode restructuring backport. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20231017211722.795508212@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 104 +++++++++----------------- 1 file changed, 36 insertions(+), 68 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index dd2d3fde8d06..4235c95f17cf 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -307,12 +307,12 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size, return size ? NULL : patch; } -static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, + struct microcode_intel *mc, + u32 *cur_rev) { - struct microcode_intel *mc; - u32 rev, old_rev, date; + u32 rev; - mc = uci->mc; if (!mc) return UCODE_NFOUND; @@ -321,14 +321,12 @@ static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) * operation - when the other hyperthread has updated the microcode * already. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; + *cur_rev = intel_get_microcode_revision(); + if (*cur_rev >= mc->hdr.rev) { + uci->cpu_sig.rev = *cur_rev; return UCODE_OK; } - old_rev = rev; - /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. @@ -343,13 +341,24 @@ static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) return UCODE_ERROR; uci->cpu_sig.rev = rev; - - date = mc->hdr.date; - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); return UCODE_UPDATED; } +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +{ + struct microcode_intel *mc = uci->mc; + enum ucode_state ret; + u32 cur_rev, date; + + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret == UCODE_UPDATED) { + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); + } + return ret; +} + static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { unsigned int eax = 1, ebx, ecx = 0, edx; @@ -459,70 +468,29 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) return 0; } -static enum ucode_state apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_late(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - struct microcode_intel *mc; + struct microcode_intel *mc = ucode_patch_late; enum ucode_state ret; - static int prev_rev; - u32 rev; + u32 cur_rev; - /* We should bind the task to the CPU */ - if (WARN_ON(raw_smp_processor_id() != cpu)) + if (WARN_ON_ONCE(smp_processor_id() != cpu)) return UCODE_ERROR; - mc = ucode_patch_late; - if (!mc) - return UCODE_NFOUND; + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret != UCODE_UPDATED && ret != UCODE_OK) + return ret; - /* - * Save us the MSR write below - which is a particular expensive - * operation - when the other hyperthread has updated the microcode - * already. - */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - ret = UCODE_OK; - goto out; - } - - /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. - */ - native_wbinvd(); - - /* write microcode via MSR 0x79 */ - wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); - - rev = intel_get_microcode_revision(); - - if (rev != mc->hdr.rev) { - pr_err("CPU%d update to revision 0x%x failed\n", - cpu, mc->hdr.rev); - return UCODE_ERROR; - } - - if (bsp && rev != prev_rev) { - pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", - rev, - mc->hdr.date & 0xffff, - mc->hdr.date >> 24, + if (!cpu && uci->cpu_sig.rev != cur_rev) { + pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n", + uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24, (mc->hdr.date >> 16) & 0xff); - prev_rev = rev; } - ret = UCODE_UPDATED; - -out: - uci->cpu_sig.rev = rev; - c->microcode = rev; - - /* Update boot_cpu_data's revision too, if we're on the BSP: */ - if (bsp) - boot_cpu_data.microcode = rev; + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; return ret; } @@ -663,7 +631,7 @@ static void finalize_late_load(int result) static struct microcode_ops microcode_intel_ops = { .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .apply_microcode = apply_microcode_late, .finalize_late_load = finalize_late_load, }; -- Gitee From 178b1039486a2f183ad6fa2af2b1c359e9796fd2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:45 +0200 Subject: [PATCH 075/953] x86/microcode/intel: Rework intel_cpu_collect_info() ANBZ: #8003 commit 164aa1ca537238c46923ccacd8995b4265aee47b upstream. Nothing needs struct ucode_cpu_info. Make it take struct cpu_signature, let it return a boolean and simplify the implementation. Rename it now that the silly name clash with collect_cpu_info() is gone. Intel-SIG: commit 164aa1ca5372 x86/microcode/intel: Rework intel_cpu_collect_info(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.851573238@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/cpu.h | 4 ++-- arch/x86/kernel/cpu/microcode/intel.c | 33 ++++++++------------------- drivers/platform/x86/intel/ifs/load.c | 8 +++---- 3 files changed, 14 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 25050d953eee..068a07ed6165 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -71,9 +71,9 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} extern __noendbr void cet_disable(void); -struct ucode_cpu_info; +struct cpu_signature; -int intel_cpu_collect_info(struct ucode_cpu_info *uci); +void intel_collect_cpu_info(struct cpu_signature *sig); static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, unsigned int s2, unsigned int p2) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 4235c95f17cf..5aa7f5efc440 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -68,36 +68,21 @@ static inline unsigned int exttable_size(struct extended_sigtable *et) return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } -int intel_cpu_collect_info(struct ucode_cpu_info *uci) +void intel_collect_cpu_info(struct cpu_signature *sig) { - unsigned int val[2]; - unsigned int family, model; - struct cpu_signature csig = { 0 }; - unsigned int eax, ebx, ecx, edx; - - memset(uci, 0, sizeof(*uci)); - - eax = 0x00000001; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - csig.sig = eax; + sig->sig = cpuid_eax(1); + sig->pf = 0; + sig->rev = intel_get_microcode_revision(); - family = x86_family(eax); - model = x86_model(eax); + if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) { + unsigned int val[2]; - if (model >= 5 || family > 6) { /* get processor flags from MSR 0x17 */ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig.pf = 1 << ((val[1] >> 18) & 7); + sig->pf = 1 << ((val[1] >> 18) & 7); } - - csig.rev = intel_get_microcode_revision(); - - uci->cpu_sig = csig; - - return 0; } -EXPORT_SYMBOL_GPL(intel_cpu_collect_info); +EXPORT_SYMBOL_GPL(intel_collect_cpu_info); /* * Returns 1 if update has been found, 0 otherwise. @@ -391,7 +376,7 @@ static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info * if (!(cp.data && cp.size)) return NULL; - intel_cpu_collect_info(uci); + intel_collect_cpu_info(&uci->cpu_sig); return scan_microcode(cp.data, cp.size, uci, save); } diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index cf156c4a8024..d97c129e97ff 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -349,7 +349,7 @@ static int scan_chunks_sanity_check(struct device *dev) static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data) { - struct ucode_cpu_info uci; + struct cpu_signature sig; /* Provide a specific error message when loading an older/unsupported image */ if (data->hdrver != MC_HEADER_TYPE_IFS) { @@ -362,11 +362,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ return -EINVAL; } - intel_cpu_collect_info(&uci); + intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, - uci.cpu_sig.sig, - uci.cpu_sig.pf)) { + if (!intel_find_matching_signature((void *)data, sig.sig, sig.pf)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } -- Gitee From 555337dcfdbc73eb14c40bd6882803accdcb904e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:49 +0200 Subject: [PATCH 076/953] x86/microcode/intel: Reuse intel_cpu_collect_info() ANBZ: #8003 commit 11f96ac4c21e701650c7d8349b252973185ac6ce upstream. No point for an almost duplicate function. Intel-SIG: commit 11f96ac4c21e x86/microcode/intel: Reuse intel_cpu_collect_info(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.741173606@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 5aa7f5efc440..47a96c0ef65f 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -435,21 +435,7 @@ void reload_ucode_intel(void) static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu_num); - unsigned int val[2]; - - memset(csig, 0, sizeof(*csig)); - - csig->sig = cpuid_eax(0x00000001); - - if ((c->x86_model >= 5) || (c->x86 > 6)) { - /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - csig->rev = c->microcode; - + intel_collect_cpu_info(csig); return 0; } -- Gitee From 3b629bbdf3769a2245cab2cfb389ff56e71d9115 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:50 +0200 Subject: [PATCH 077/953] x86/microcode/intel: Rework intel_find_matching_signature() ANBZ: #8003 commit b7fcd995b261c9976e05f47554529c98a0f1cbb0 upstream. Take a cpu_signature argument and work from there. Move the match() helper next to the callsite as there is no point for having it in a header. Intel-SIG: commit b7fcd995b261 x86/microcode/intel: Rework intel_find_matching_signature(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.797820205@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/cpu.h | 16 +------------- arch/x86/kernel/cpu/microcode/intel.c | 31 ++++++++++++++++----------- drivers/platform/x86/intel/ifs/load.c | 2 +- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 068a07ed6165..fecc4fe1d68a 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -75,22 +75,8 @@ struct cpu_signature; void intel_collect_cpu_info(struct cpu_signature *sig); -static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, - unsigned int s2, unsigned int p2) -{ - if (s1 != s2) - return false; - - /* Processor flags are either both 0 ... */ - if (!p1 && !p2) - return true; - - /* ... or they intersect. */ - return p1 & p2; -} - extern u64 x86_read_arch_cap_msr(void); -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf); +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig); int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); extern struct cpumask cpus_stop_mask; diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 47a96c0ef65f..e5c5ddfd6831 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -84,29 +84,36 @@ void intel_collect_cpu_info(struct cpu_signature *sig) } EXPORT_SYMBOL_GPL(intel_collect_cpu_info); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + if (s1->sig != sig2) + return false; + + /* Processor flags are either both 0 or they intersect. */ + return ((!s1->pf && !pf2) || (s1->pf & pf2)); +} + +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig) { struct microcode_header_intel *mc_hdr = mc; - struct extended_sigtable *ext_hdr; struct extended_signature *ext_sig; + struct extended_sigtable *ext_hdr; int i; - if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) - return 1; + if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf)) + return true; /* Look for ext. headers: */ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) - return 0; + return false; ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; for (i = 0; i < ext_hdr->count; i++) { - if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) - return 1; + if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf)) + return true; ext_sig++; } return 0; @@ -268,7 +275,7 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size, intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) + if (!intel_find_matching_signature(data, &uci->cpu_sig)) continue; /* @@ -512,7 +519,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (cur_rev >= mc_header.rev) continue; - if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) + if (!intel_find_matching_signature(mc, &uci->cpu_sig)) continue; kvfree(new_mc); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index d97c129e97ff..2cf3b4a8813f 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -364,7 +364,7 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, sig.sig, sig.pf)) { + if (!intel_find_matching_signature((void *)data, &sig)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } -- Gitee From cd7203517b98f345c051553d8beac6abe42fe08e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:49 +0200 Subject: [PATCH 078/953] x86/microcode: Remove pointless apply() invocation ANBZ: #8003 commit b48b26f992a3828b4ae274669f99ce68451d4904 upstream. Microcode is applied on the APs during early bringup. There is no point in trying to apply the microcode again during the hotplug operations and neither at the point where the microcode device is initialized. Collect CPU info and microcode revision in setup_online_cpu() for now. This will move to the CPU hotplug callback later. [ bp: Leave the starting notifier for the following scenario: - boot, late load, suspend to disk, resume without the starting notifier, only the last core manages to update the microcode upon resume: # rdmsr -a 0x8b 10000bf 10000bf 10000bf 10000bf 10000bf 10000dc <---- This is on an AMD F10h machine. For the future, one should check whether potential unification of the CPU init path could cover the resume path too so that this can be simplified even more. tglx: This is caused by the odd handling of APs which try to find the microcode blob in builtin or initrd instead of caching the microcode blob during early init before the APs are brought up. Will be cleaned up in a later step. ] Intel-SIG: commit b48b26f992a3 x86/microcode: Remove pointless apply() invocation. Microcode restructuring backport. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20231017211723.018821624@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a0751b0491b1..b7c0b462919a 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -511,17 +511,6 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_init_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - - return microcode_ops->apply_microcode(cpu); -} - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -576,14 +565,14 @@ static int mc_cpu_down_prep(unsigned int cpu) static void setup_online_cpu(struct work_struct *work) { int cpu = smp_processor_id(); - enum ucode_state err; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - err = microcode_init_cpu(cpu); - if (err == UCODE_ERROR) { - pr_err("Error applying microcode on CPU%d\n", cpu); - return; - } + memset(uci, 0, sizeof(*uci)); + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; mc_cpu_online(cpu); } -- Gitee From 62f73eb6cd842c33d40c77009a5948a052fca8a5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 10 Oct 2023 17:08:41 +0200 Subject: [PATCH 079/953] x86/microcode/amd: Use correct per CPU ucode_cpu_info ANBZ: #8003 commit ecfd41089348fa4cc767dc588367e9fdf8cb6b9d upstream. find_blobs_in_containers() is invoked on every CPU but overwrites unconditionally ucode_cpu_info of CPU0. Fix this by using the proper CPU data and move the assignment into the call site apply_ucode_from_containers() so that the function can be reused. Intel-SIG: commit ecfd41089348 x86/microcode/amd: Use correct per CPU ucode_cpu_info. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231010150702.433454320@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 93156848df12..2bc1a85ddd18 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -506,9 +506,6 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) cp = find_microcode_in_initrd(ucode_path); - /* Needed in load_microcode_amd() */ - ucode_cpu_info->cpu_sig.sig = cpuid_1_eax; - *ret = cp; } @@ -516,6 +513,9 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; + /* Needed in load_microcode_amd() */ + ucode_cpu_info[smp_processor_id()].cpu_sig.sig = cpuid_1_eax; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; -- Gitee From afcb42c989ba3a15732d0ccdd02fd0a2ce7d0925 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:11:31 +0800 Subject: [PATCH 080/953] x86/microcode/amd: Cache builtin microcode too ANBZ: #8003 commit d419d28261e72e1c9ec418711b3da41df2265139 upstream. save_microcode_in_initrd_amd() fails to cache builtin microcode and only scans initrd. Use find_blobs_in_containers() instead which covers both. Intel-SIG: commit d419d28261e7 x86/microcode/amd: Cache builtin microcode too. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231010150702.495139089@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2bc1a85ddd18..7253641e4184 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -535,14 +535,8 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; - const char *path; - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = "kernel/x86/microcode/HygonGenuine.bin"; - else - path = ucode_path; - - cp = find_microcode_in_initrd(path); + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; -- Gitee From c6bc68517126f8785a7abab10e2af2159e7a4a94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 16:15:42 +0800 Subject: [PATCH 081/953] x86/microcode/amd: Cache builtin/initrd microcode early ANBZ: #8003 commit a7939f01672034a58ad3fdbce69bb6c665ce0024 upstream. There is no reason to scan builtin/initrd microcode on each AP. Cache the builtin/initrd microcode in an early initcall so that the early AP loader can utilize the cache. The existing fs initcall which invoked save_microcode_in_initrd_amd() is still required to maintain the initrd_gone flag. Rename it accordingly. This will be removed once the AP loader code is converted to use the cache. Intel-SIG: commit a7939f016720 x86/microcode/amd: Cache builtin/initrd microcode early. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.187566507@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 9 ++++++++- arch/x86/kernel/cpu/microcode/core.c | 28 ++++------------------------ 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 7253641e4184..81dd20652dee 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -530,12 +530,18 @@ void load_ucode_amd_early(unsigned int cpuid_1_eax) static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); -int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) +static int __init save_microcode_in_initrd(void) { + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + if (dis_ucode_ldr || ((c->x86_vendor != X86_VENDOR_AMD || + c->x86 < 0x10) && (c->x86_vendor != X86_VENDOR_HYGON))) + return 0; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; @@ -552,6 +558,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return 0; } +early_initcall(save_microcode_in_initrd); /* * a small, trivial cache of per-family ucode patches diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b7c0b462919a..b0175ced6f1e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -192,33 +192,14 @@ void load_ucode_ap(void) } } -static int __init save_microcode_in_initrd(void) +/* Temporary workaround until find_microcode_in_initrd() is __init */ +static int __init mark_initrd_gone(void) { - struct cpuinfo_x86 *c = &boot_cpu_data; - int ret = -EINVAL; - - if (dis_ucode_ldr) { - ret = 0; - goto out; - } - - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - if (c->x86 >= 0x10) - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); - break; - case X86_VENDOR_HYGON: - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); - break; - default: - break; - } - -out: initrd_gone = true; - return ret; + return 0; } +fs_initcall(mark_initrd_gone); struct cpio_data find_microcode_in_initrd(const char *path) { @@ -641,5 +622,4 @@ static int __init microcode_init(void) return error; } -fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); -- Gitee From bd8f9ab13ad9187509310f4a65c71eb88d879659 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:39:30 +0800 Subject: [PATCH 082/953] x86/microcode/amd: Use cached microcode for AP load ANBZ: #8003 commit 5af05b8d51a8e3ff5905663655c0f46d1aaae44a upstream. Now that the microcode cache is initialized before the APs are brought up, there is no point in scanning builtin/initrd microcode during AP loading. Convert the AP loader to utilize the cache, which in turn makes the CPU hotplug callback which applies the microcode after initrd/builtin is gone, obsolete as the early loading during late hotplug operations including the resume path depends now only on the cache. Intel-SIG: commit 5af05b8d51a8 x86/microcode/amd: Use cached microcode for AP load. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.243426023@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 20 +++++++++++--------- arch/x86/kernel/cpu/microcode/core.c | 17 +++-------------- arch/x86/kernel/cpu/microcode/internal.h | 2 -- 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 81dd20652dee..cd157a8d43eb 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -499,7 +499,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) return false; } -static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) +static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { struct cpio_data cp; @@ -509,12 +509,12 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret = cp; } -static void apply_ucode_from_containers(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; /* Needed in load_microcode_amd() */ - ucode_cpu_info[smp_processor_id()].cpu_sig.sig = cpuid_1_eax; + ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) @@ -523,11 +523,6 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } -void load_ucode_amd_early(unsigned int cpuid_1_eax) -{ - return apply_ucode_from_containers(cpuid_1_eax); -} - static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); static int __init save_microcode_in_initrd(void) @@ -612,7 +607,6 @@ static struct ucode_patch *find_patch(unsigned int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u16 equiv_id; - equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; @@ -714,6 +708,14 @@ static enum ucode_state apply_microcode_amd(int cpu) return ret; } +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu = smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b0175ced6f1e..2b6e93cb9b73 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -163,7 +163,7 @@ void __init load_ucode_bsp(void) if (intel) load_ucode_intel_bsp(); else - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_bsp(cpuid_1_eax); } void load_ucode_ap(void) @@ -182,10 +182,10 @@ void load_ucode_ap(void) break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; case X86_VENDOR_HYGON: - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; default: break; @@ -510,15 +510,6 @@ static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; -static int mc_cpu_starting(unsigned int cpu) -{ - enum ucode_state err = microcode_ops->apply_microcode(cpu); - - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); - - return err == UCODE_ERROR; -} - static int mc_cpu_online(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); @@ -608,8 +599,6 @@ static int __init microcode_init(void) schedule_on_each_cpu(setup_online_cpu); register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 86a249f69bef..1a3a26ea5a3e 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -97,7 +97,6 @@ extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_ap(unsigned int family); -void load_ucode_amd_early(unsigned int cpuid_1_eax); int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); @@ -105,7 +104,6 @@ void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline void load_ucode_amd_early(unsigned int family) { } static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } -- Gitee From a58cd37647df7364aaf5cccc62816a4819551f8b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:23:57 +0800 Subject: [PATCH 083/953] x86/microcode: Mop up early loading leftovers ANBZ: #8003 commit 8529e8ab6c6fab8ebf06ead98e77d7646b42fc48 upstream. Get rid of the initrd_gone hack which was required to keep find_microcode_in_initrd() functional after init. As find_microcode_in_initrd() is now only used during init, mark it accordingly. Intel-SIG: commit 8529e8ab6c6f x86/microcode: Mop up early loading leftovers. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.298854846@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 18 +----------------- arch/x86/kernel/cpu/microcode/internal.h | 1 - 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 2b6e93cb9b73..10e822e88c0e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -48,8 +48,6 @@ static struct microcode_ops *microcode_ops; #endif bool dis_ucode_ldr = true; -bool initrd_gone; - /* * Synchronization. * @@ -192,16 +190,7 @@ void load_ucode_ap(void) } } -/* Temporary workaround until find_microcode_in_initrd() is __init */ -static int __init mark_initrd_gone(void) -{ - initrd_gone = true; - - return 0; -} -fs_initcall(mark_initrd_gone); - -struct cpio_data find_microcode_in_initrd(const char *path) +struct cpio_data __init find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; @@ -229,12 +218,7 @@ struct cpio_data find_microcode_in_initrd(const char *path) * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. - * - * initrd_gone is for the hotplug case where we've thrown out initrd - * already. */ - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; if (initrd_start) start = initrd_start; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 1a3a26ea5a3e..6ed522d5b942 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -92,7 +92,6 @@ static inline unsigned int x86_cpuid_family(void) } extern bool dis_ucode_ldr; -extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); -- Gitee From c3142d1697f060e69e246ea1285cf6e043f70fd5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:58 +0200 Subject: [PATCH 084/953] x86/microcode: Get rid of the schedule work indirection ANBZ: #8003 commit 2e1997335ceb6fc819862804f51d4fe83593c138 upstream. Scheduling work on all CPUs to collect the microcode information is just another extra step for no value. Let the CPU hotplug callback registration do it. Intel-SIG: commit 2e1997335ceb x86/microcode: Get rid of the schedule work indirection. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.354748138@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 29 ++++++++++------------------ 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 10e822e88c0e..17372282bb92 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -496,8 +496,16 @@ static struct syscore_ops mc_syscore_ops = { static int mc_cpu_online(unsigned int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct device *dev = get_cpu_device(cpu); + memset(uci, 0, sizeof(*uci)); + + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; @@ -518,20 +526,6 @@ static int mc_cpu_down_prep(unsigned int cpu) return 0; } -static void setup_online_cpu(struct work_struct *work) -{ - int cpu = smp_processor_id(); - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - cpu_data(cpu).microcode = uci->cpu_sig.rev; - if (!cpu) - boot_cpu_data.microcode = uci->cpu_sig.rev; - mc_cpu_online(cpu); -} - static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -579,12 +573,9 @@ static int __init microcode_init(void) } } - /* Do per-CPU setup */ - schedule_on_each_cpu(setup_online_cpu); - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", - mc_cpu_online, mc_cpu_down_prep); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); -- Gitee From 008f93b0aee1f34879007ff235bb0daede76e9bd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:55 +0200 Subject: [PATCH 085/953] x86/microcode: Clean up mc_cpu_down_prep() ANBZ: #8003 commit ba48aa32388ac652256baa8d0a6092d350160da0 upstream. This function has nothing to do with suspend. It's a hotplug callback. Remove the bogus comment. Drop the pointless debug printk. The hotplug core provides tracepoints which track the invocation of those callbacks. Intel-SIG: commit ba48aa32388a x86/microcode: Clean up mc_cpu_down_prep(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.028651784@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 17372282bb92..3b4987510650 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -513,16 +513,10 @@ static int mc_cpu_online(unsigned int cpu) static int mc_cpu_down_prep(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); + struct device *dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); - - /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("%s: CPU%d\n", __func__, cpu); - return 0; } -- Gitee From 4f3953b8ba17ee8af52cf9a372cf5c911f655af7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:56 +0200 Subject: [PATCH 086/953] x86/microcode: Handle "nosmt" correctly ANBZ: #8003 commit 634ac23ad609b3ddd9e0e478bd5afbf49d3a2556 upstream. On CPUs where microcode loading is not NMI-safe the SMT siblings which are parked in one of the play_dead() variants still react to NMIs. So if an NMI hits while the primary thread updates the microcode the resulting behaviour is undefined. The default play_dead() implementation on modern CPUs is using MWAIT which is not guaranteed to be safe against a microcode update which affects MWAIT. Take the cpus_booted_once_mask into account to detect this case and refuse to load late if the vendor specific driver does not advertise that late loading is NMI safe. AMD stated that this is safe, so mark the AMD driver accordingly. This requirement will be partially lifted in later changes. Intel-SIG: commit 634ac23ad609 x86/microcode: Handle "nosmt" correctly. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.087472735@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/microcode/amd.c | 9 +++-- arch/x86/kernel/cpu/microcode/core.c | 51 +++++++++++++++--------- arch/x86/kernel/cpu/microcode/internal.h | 13 +++--- 4 files changed, 44 insertions(+), 31 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8d48aa9ad287..1fbb7ae81da3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1320,7 +1320,7 @@ config MICROCODE_INITRD32 config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n - depends on MICROCODE + depends on MICROCODE && SMP help Loading microcode late, when the system is up and executing instructions is a tricky business and should be avoided if possible. Just the sequence diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index cd157a8d43eb..f0b246eda09c 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -925,10 +925,11 @@ static void microcode_fini_cpu_amd(int cpu) } static struct microcode_ops microcode_amd_ops = { - .request_microcode_fw = request_microcode_amd, - .collect_cpu_info = collect_cpu_info_amd, - .apply_microcode = apply_microcode_amd, - .microcode_fini_cpu = microcode_fini_cpu_amd, + .request_microcode_fw = request_microcode_amd, + .collect_cpu_info = collect_cpu_info_amd, + .apply_microcode = apply_microcode_amd, + .microcode_fini_cpu = microcode_fini_cpu_amd, + .nmi_safe = true, }; struct microcode_ops * __init init_amd_microcode(void) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 3b4987510650..03a0c6af1033 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -269,23 +269,6 @@ static struct platform_device *microcode_pdev; */ #define SPINUNIT 100 /* 100 nsec */ -static int check_online_cpus(void) -{ - unsigned int cpu; - - /* - * Make sure all CPUs are online. It's fine for SMT to be disabled if - * all the primary threads are still online. - */ - for_each_present_cpu(cpu) { - if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { - pr_err("Not all CPUs online, aborting microcode update.\n"); - return -EINVAL; - } - } - - return 0; -} static atomic_t late_cpus_in; static atomic_t late_cpus_out; @@ -402,6 +385,35 @@ static int microcode_reload_late(void) return ret; } +/* + * Ensure that all required CPUs which are present and have been booted + * once are online. + * + * To pass this check, all primary threads must be online. + * + * If the microcode load is not safe against NMI then all SMT threads + * must be online as well because they still react to NMIs when they are + * soft-offlined and parked in one of the play_dead() variants. So if a + * NMI hits while the primary thread updates the microcode the resulting + * behaviour is undefined. The default play_dead() implementation on + * modern CPUs uses MWAIT, which is also not guaranteed to be safe + * against a microcode update which affects MWAIT. + */ +static bool ensure_cpus_are_online(void) +{ + unsigned int cpu; + + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + if (!cpu_online(cpu)) { + if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { + pr_err("CPU %u not online\n", cpu); + return false; + } + } + } + return true; +} + static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -417,9 +429,10 @@ static ssize_t reload_store(struct device *dev, cpus_read_lock(); - ret = check_online_cpus(); - if (ret) + if (!ensure_cpus_are_online()) { + ret = -EBUSY; goto put; + } tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); if (tmp_ret != UCODE_NEW) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 6ed522d5b942..cec418225e75 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -20,18 +20,17 @@ enum ucode_state { struct microcode_ops { enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); - void (*microcode_fini_cpu)(int cpu); /* - * The generic 'microcode_core' part guarantees that - * the callbacks below run on a target cpu when they - * are being called. + * The generic 'microcode_core' part guarantees that the callbacks + * below run on a target CPU when they are being called. * See also the "Synchronization" section in microcode_core.c. */ - enum ucode_state (*apply_microcode)(int cpu); - int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); - void (*finalize_late_load)(int result); + enum ucode_state (*apply_microcode)(int cpu); + int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); + unsigned int nmi_safe : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; -- Gitee From cd862aa72594eec86209717908264bdb6c41fcd1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:57 +0200 Subject: [PATCH 087/953] x86/microcode: Clarify the late load logic ANBZ: #8003 commit 6f059e634dcd0d725854514c94c114bbdd83950d upstream. reload_store() is way too complicated. Split the inner workings out and make the following enhancements: - Taint the kernel only when the microcode was actually updated. If. e.g. the rendezvous fails, then nothing happened and there is no reason for tainting. - Return useful error codes Intel-SIG: commit 6f059e634dcd x86/microcode: Clarify the late load logic. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: https://lore.kernel.org/r/20231002115903.145048840@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 41 +++++++++++++--------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 03a0c6af1033..acced35aa200 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -377,11 +377,11 @@ static int microcode_reload_late(void) pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); microcode_check(&prev_info); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); } else { pr_info("Reload failed, current microcode revision: 0x%x\n", boot_cpu_data.microcode); } - return ret; } @@ -414,40 +414,37 @@ static bool ensure_cpus_are_online(void) return true; } +static int ucode_load_late_locked(void) +{ + if (!ensure_cpus_are_online()) + return -EBUSY; + + switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { + case UCODE_NEW: + return microcode_reload_late(); + case UCODE_NFOUND: + return -ENOENT; + default: + return -EBADFD; + } +} + static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - enum ucode_state tmp_ret = UCODE_OK; - int bsp = boot_cpu_data.cpu_index; unsigned long val; - ssize_t ret = 0; + ssize_t ret; ret = kstrtoul(buf, 0, &val); if (ret || val != 1) return -EINVAL; cpus_read_lock(); - - if (!ensure_cpus_are_online()) { - ret = -EBUSY; - goto put; - } - - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); - if (tmp_ret != UCODE_NEW) - goto put; - - ret = microcode_reload_late(); -put: + ret = ucode_load_late_locked(); cpus_read_unlock(); - if (ret == 0) - ret = size; - - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - - return ret; + return ret ? : size; } static DEVICE_ATTR_WO(reload); -- Gitee From 5a265bf70ab886cbcf25e68368df8e14d654cad9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:59 +0200 Subject: [PATCH 088/953] x86/microcode: Sanitize __wait_for_cpus() ANBZ: #8003 commit 0772b9aa1a8f7322dce8588c231cff8b57298a53 upstream. The code is too complicated for no reason: - The return value is pointless as this is a strict boolean. - It's way simpler to count down from num_online_cpus() and check for zero. - The timeout argument is pointless as this is always one second. - Touching the NMI watchdog every 100ns does not make any sense, neither does checking every 100ns. This is really not a hotpath operation. Preload the atomic counter with the number of online CPUs and simplify the whole timeout logic. Delay for one microsecond and touch the NMI watchdog once per millisecond. Intel-SIG: commit 0772b9aa1a8f x86/microcode: Sanitize __wait_for_cpus(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.204251527@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 39 ++++++++++++---------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index acced35aa200..c9b25f180d01 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,31 +267,26 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ -#define SPINUNIT 100 /* 100 nsec */ +static atomic_t late_cpus_in, late_cpus_out; - -static atomic_t late_cpus_in; -static atomic_t late_cpus_out; - -static int __wait_for_cpus(atomic_t *t, long long timeout) +static bool wait_for_cpus(atomic_t *cnt) { - int all_cpus = num_online_cpus(); + unsigned int timeout; - atomic_inc(t); + WARN_ON_ONCE(atomic_dec_return(cnt) < 0); - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) { - pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", - all_cpus - atomic_read(t)); - return 1; - } + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (!atomic_read(cnt)) + return true; - ndelay(SPINUNIT); - timeout -= SPINUNIT; + udelay(1); - touch_nmi_watchdog(); + if (!(timeout % USEC_PER_MSEC)) + touch_nmi_watchdog(); } - return 0; + /* Prevent the late comers from making progress and let them time out */ + atomic_inc(cnt); + return false; } /* @@ -309,7 +304,7 @@ static int __reload_late(void *info) * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ - if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) + if (!wait_for_cpus(&late_cpus_in)) return -1; /* @@ -332,7 +327,7 @@ static int __reload_late(void *info) } wait_for_siblings: - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) + if (!wait_for_cpus(&late_cpus_out)) panic("Timeout during microcode update!\n"); /* @@ -359,8 +354,8 @@ static int microcode_reload_late(void) pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); pr_err("You should switch to early loading, if possible.\n"); - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); + atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&late_cpus_out, num_online_cpus()); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From be7fda2541b4ca4ed3321f269bbd9a1fc05019f6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:24:05 +0200 Subject: [PATCH 089/953] x86/microcode: Add per CPU result state ANBZ: #8003 commit 4b753955e9151ad2f722137a7bcbafda756186b3 upstream. The microcode rendezvous is purely acting on global state, which does not allow to analyze fails in a coherent way. Introduce per CPU state where the results are written into, which allows to analyze the return codes of the individual CPUs. Initialize the state when walking the cpu_present_mask in the online check to avoid another for_each_cpu() loop. Enhance the result print out with that. The structure is intentionally named ucode_ctrl as it will gain control fields in subsequent changes. Intel-SIG: commit 4b753955e915 x86/microcode: Add per CPU result state. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.632681010@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 114 +++++++++++++---------- arch/x86/kernel/cpu/microcode/internal.h | 1 + 2 files changed, 68 insertions(+), 47 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c9b25f180d01..a82e825ff7d6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,6 +267,11 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ +struct microcode_ctrl { + enum ucode_state result; +}; + +static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); static atomic_t late_cpus_in, late_cpus_out; static bool wait_for_cpus(atomic_t *cnt) @@ -289,23 +294,19 @@ static bool wait_for_cpus(atomic_t *cnt) return false; } -/* - * Returns: - * < 0 - on error - * 0 - success (no update done or microcode was updated) - */ -static int __reload_late(void *info) +static int load_cpus_stopped(void *unused) { int cpu = smp_processor_id(); - enum ucode_state err; - int ret = 0; + enum ucode_state ret; /* * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ - if (!wait_for_cpus(&late_cpus_in)) - return -1; + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return 0; + } /* * On an SMT system, it suffices to load the microcode on one sibling of @@ -314,17 +315,11 @@ static int __reload_late(void *info) * loading attempts happen on multiple threads of an SMT core. See * below. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - err = microcode_ops->apply_microcode(cpu); - else + if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) goto wait_for_siblings; - if (err >= UCODE_NFOUND) { - if (err == UCODE_ERROR) { - pr_warn("Error reloading microcode on CPU %d\n", cpu); - ret = -1; - } - } + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); wait_for_siblings: if (!wait_for_cpus(&late_cpus_out)) @@ -336,19 +331,18 @@ static int __reload_late(void *info) * per-cpu cpuinfo can be updated with right microcode * revision. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - err = microcode_ops->apply_microcode(cpu); + if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) + return 0; - return ret; + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + return 0; } -/* - * Reload microcode late on all CPUs. Wait for a sec until they - * all gather together. - */ -static int microcode_reload_late(void) +static int load_late_stop_cpus(void) { - int old = boot_cpu_data.microcode, ret; + unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); @@ -363,26 +357,47 @@ static int microcode_reload_late(void) */ store_cpu_caps(&prev_info); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + + /* Analyze the results */ + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + switch (per_cpu(ucode_ctrl.result, cpu)) { + case UCODE_UPDATED: updated++; break; + case UCODE_TIMEOUT: timedout++; break; + case UCODE_OK: siblings++; break; + default: failed++; break; + } + } if (microcode_ops->finalize_late_load) - microcode_ops->finalize_late_load(ret); - - if (!ret) { - pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", - old, boot_cpu_data.microcode); - microcode_check(&prev_info); - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - } else { - pr_info("Reload failed, current microcode revision: 0x%x\n", - boot_cpu_data.microcode); + microcode_ops->finalize_late_load(!updated); + + if (!updated) { + /* Nothing changed. */ + if (!failed && !timedout) + return 0; + pr_err("update failed: %u CPUs failed %u CPUs timed out\n", + failed, timedout); + return -EIO; + } + + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); + if (failed || timedout) { + pr_err("load incomplete. %u CPUs timed out or failed\n", + num_online_cpus() - (updated + siblings)); } - return ret; + pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); + microcode_check(&prev_info); + + return updated + siblings == num_online_cpus() ? 0 : -EIO; } /* - * Ensure that all required CPUs which are present and have been booted - * once are online. + * This function does two things: + * + * 1) Ensure that all required CPUs which are present and have been booted + * once are online. * * To pass this check, all primary threads must be online. * @@ -393,9 +408,12 @@ static int microcode_reload_late(void) * behaviour is undefined. The default play_dead() implementation on * modern CPUs uses MWAIT, which is also not guaranteed to be safe * against a microcode update which affects MWAIT. + * + * 2) Initialize the per CPU control structure */ -static bool ensure_cpus_are_online(void) +static bool setup_cpus(void) { + struct microcode_ctrl ctrl = { .result = -1, }; unsigned int cpu; for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { @@ -405,18 +423,20 @@ static bool ensure_cpus_are_online(void) return false; } } + /* Initialize the per CPU state */ + per_cpu(ucode_ctrl, cpu) = ctrl; } return true; } -static int ucode_load_late_locked(void) +static int load_late_locked(void) { - if (!ensure_cpus_are_online()) + if (!setup_cpus()) return -EBUSY; switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { case UCODE_NEW: - return microcode_reload_late(); + return load_late_stop_cpus(); case UCODE_NFOUND: return -ENOENT; default: @@ -436,7 +456,7 @@ static ssize_t reload_store(struct device *dev, return -EINVAL; cpus_read_lock(); - ret = ucode_load_late_locked(); + ret = load_late_locked(); cpus_read_unlock(); return ret ? : size; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index cec418225e75..2db13aeb707b 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -16,6 +16,7 @@ enum ucode_state { UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, + UCODE_TIMEOUT, }; struct microcode_ops { -- Gitee From 1983f2f6fd6d0cc1c7ca48b992a86f2d7c7d21a0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:01 +0200 Subject: [PATCH 090/953] x86/microcode: Add per CPU control field ANBZ: #8003 commit ba3aeb97cb2c53025356f31c5a0a294385194115 upstream. Add a per CPU control field to ucode_ctrl and define constants for it which are going to be used to control the loading state machine. In theory this could be a global control field, but a global control does not cover the following case: 15 primary CPUs load microcode successfully 1 primary CPU fails and returns with an error code With global control the sibling of the failed CPU would either try again or the whole operation would be aborted with the consequence that the 15 siblings do not invoke the apply path and end up with inconsistent software state. The result in dmesg would be inconsistent too. There are two additional fields added and initialized: ctrl_cpu and secondaries. ctrl_cpu is the CPU number of the primary thread for now, but with the upcoming uniform loading at package or system scope this will be one CPU per package or just one CPU. Secondaries hands the control CPU a CPU mask which will be required to release the secondary CPUs out of the wait loop. Preparatory change for implementing a properly split control flow for primary and secondary CPUs. Intel-SIG: commit ba3aeb97cb2c x86/microcode: Add per CPU control field. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.319959519@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a82e825ff7d6..f27f78c274d7 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,8 +267,19 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ +enum sibling_ctrl { + /* Spinwait with timeout */ + SCTRL_WAIT, + /* Invoke the microcode_apply() callback */ + SCTRL_APPLY, + /* Proceed without invoking the microcode_apply() callback */ + SCTRL_DONE, +}; + struct microcode_ctrl { + enum sibling_ctrl ctrl; enum ucode_state result; + unsigned int ctrl_cpu; }; static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); @@ -413,7 +424,7 @@ static int load_late_stop_cpus(void) */ static bool setup_cpus(void) { - struct microcode_ctrl ctrl = { .result = -1, }; + struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; unsigned int cpu; for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { @@ -423,7 +434,12 @@ static bool setup_cpus(void) return false; } } - /* Initialize the per CPU state */ + + /* + * Initialize the per CPU state. This is core scope for now, + * but prepared to take package or system scope into account. + */ + ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); per_cpu(ucode_ctrl, cpu) = ctrl; } return true; -- Gitee From 152c067fa12c83ca0da4d07409579bbb583fb614 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:02 +0200 Subject: [PATCH 091/953] x86/microcode: Provide new control functions ANBZ: #8003 commit 6067788f04b1020b316344fe34746f96d594a042 upstream. The current all in one code is unreadable and really not suited for adding future features like uniform loading with package or system scope. Provide a set of new control functions which split the handling of the primary and secondary CPUs. These will replace the current rendezvous all in one function in the next step. This is intentionally a separate change because diff makes an complete unreadable mess otherwise. So the flow separates the primary and the secondary CPUs into their own functions which use the control field in the per CPU ucode_ctrl struct. primary() secondary() wait_for_all() wait_for_all() apply_ucode() wait_for_release() release() apply_ucode() Intel-SIG: commit 6067788f04b1 x86/microcode: Provide new control functions. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.377922731@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 84 ++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f27f78c274d7..0e43d2e97a56 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -305,6 +305,90 @@ static bool wait_for_cpus(atomic_t *cnt) return false; } +static bool wait_for_ctrl(void) +{ + unsigned int timeout; + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; + udelay(1); + if (!(timeout % 1000)) + touch_nmi_watchdog(); + } + return false; +} + +static __maybe_unused void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return; + } + + /* + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (!wait_for_ctrl()) + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + + /* + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. + */ + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); + else + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); + + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); +} + +static __maybe_unused void load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; + } + + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + + /* + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. + */ + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; + else + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + static int load_cpus_stopped(void *unused) { int cpu = smp_processor_id(); -- Gitee From 4b17d8d4bb34ab6e055b361341bcbad348b4333a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:03 +0200 Subject: [PATCH 092/953] x86/microcode: Replace the all-in-one rendevous handler ANBZ: #8003 commit 0bf871651211b58c7b19f40b746b646d5311e2ec upstream. with a new handler which just separates the control flow of primary and secondary CPUs. Intel-SIG: commit 0bf871651211 x86/microcode: Replace the all-in-one rendevous handler. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.433704135@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 51 +++++----------------------- 1 file changed, 9 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 0e43d2e97a56..f717b2440186 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -283,7 +283,7 @@ struct microcode_ctrl { }; static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); -static atomic_t late_cpus_in, late_cpus_out; +static atomic_t late_cpus_in; static bool wait_for_cpus(atomic_t *cnt) { @@ -319,7 +319,7 @@ static bool wait_for_ctrl(void) return false; } -static __maybe_unused void load_secondary(unsigned int cpu) +static void load_secondary(unsigned int cpu) { unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); enum ucode_state ret; @@ -354,7 +354,7 @@ static __maybe_unused void load_secondary(unsigned int cpu) this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); } -static __maybe_unused void load_primary(unsigned int cpu) +static void load_primary(unsigned int cpu) { struct cpumask *secondaries = topology_sibling_cpumask(cpu); enum sibling_ctrl ctrl; @@ -391,46 +391,14 @@ static __maybe_unused void load_primary(unsigned int cpu) static int load_cpus_stopped(void *unused) { - int cpu = smp_processor_id(); - enum ucode_state ret; - - /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (!wait_for_cpus(&late_cpus_in)) { - this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); - return 0; - } - - /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - goto wait_for_siblings; + unsigned int cpu = smp_processor_id(); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); - -wait_for_siblings: - if (!wait_for_cpus(&late_cpus_out)) - panic("Timeout during microcode update!\n"); - - /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - return 0; + if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) + load_primary(cpu); + else + load_secondary(cpu); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); + /* No point to wait here. The CPUs will all wait in stop_machine(). */ return 0; } @@ -444,7 +412,6 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); - atomic_set(&late_cpus_out, num_online_cpus()); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From d5d70dd8cd3e63debc159d7d295d4e76097f31e3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:05 +0200 Subject: [PATCH 093/953] x86/microcode: Rendezvous and load in NMI ANBZ: #8003 commit 7eb314a22800457396f541c655697dabd71e44a7 upstream. stop_machine() does not prevent the spin-waiting sibling from handling an NMI, which is obviously violating the whole concept of rendezvous. Implement a static branch right in the beginning of the NMI handler which is nopped out except when enabled by the late loading mechanism. The late loader enables the static branch before stop_machine() is invoked. Each CPU has an nmi_enable in its control structure which indicates whether the CPU should go into the update routine. This is required to bridge the gap between enabling the branch and actually being at the point where it is required to enter the loader wait loop. Each CPU which arrives in the stopper thread function sets that flag and issues a self NMI right after that. If the NMI function sees the flag clear, it returns. If it's set it clears the flag and enters the rendezvous. This is safe against a real NMI which hits in between setting the flag and sending the NMI to itself. The real NMI will be swallowed by the microcode update and the self NMI will then let stuff continue. Otherwise this would end up with a spurious NMI. Intel-SIG: commit 7eb314a22800 x86/microcode: Rendezvous and load in NMI. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.489900814@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 12 +++++++ arch/x86/kernel/cpu/microcode/core.c | 42 +++++++++++++++++++++--- arch/x86/kernel/cpu/microcode/intel.c | 1 + arch/x86/kernel/cpu/microcode/internal.h | 3 +- arch/x86/kernel/nmi.c | 4 +++ 5 files changed, 57 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 78f1eb2532dc..82924828a94b 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -72,4 +72,16 @@ static inline u32 intel_get_microcode_revision(void) } #endif /* !CONFIG_CPU_SUP_INTEL */ +bool microcode_nmi_handler(void); + +#ifdef CONFIG_MICROCODE_LATE_LOADING +DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static __always_inline bool microcode_nmi_handler_enabled(void) +{ + return static_branch_unlikely(µcode_nmi_handler_enable); +} +#else +static __always_inline bool microcode_nmi_handler_enabled(void) { return false; } +#endif + #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f717b2440186..0437cb115952 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include #include #include @@ -280,8 +282,10 @@ struct microcode_ctrl { enum sibling_ctrl ctrl; enum ucode_state result; unsigned int ctrl_cpu; + bool nmi_enabled; }; +DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); static atomic_t late_cpus_in; @@ -297,7 +301,8 @@ static bool wait_for_cpus(atomic_t *cnt) udelay(1); - if (!(timeout % USEC_PER_MSEC)) + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) touch_nmi_watchdog(); } /* Prevent the late comers from making progress and let them time out */ @@ -313,7 +318,8 @@ static bool wait_for_ctrl(void) if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) return true; udelay(1); - if (!(timeout % 1000)) + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % 1000)) touch_nmi_watchdog(); } return false; @@ -389,7 +395,7 @@ static void load_primary(unsigned int cpu) } } -static int load_cpus_stopped(void *unused) +static bool microcode_update_handler(void) { unsigned int cpu = smp_processor_id(); @@ -398,7 +404,29 @@ static int load_cpus_stopped(void *unused) else load_secondary(cpu); - /* No point to wait here. The CPUs will all wait in stop_machine(). */ + touch_nmi_watchdog(); + return true; +} + +bool microcode_nmi_handler(void) +{ + if (!this_cpu_read(ucode_ctrl.nmi_enabled)) + return false; + + this_cpu_write(ucode_ctrl.nmi_enabled, false); + return microcode_update_handler(); +} + +static int load_cpus_stopped(void *unused) +{ + if (microcode_ops->use_nmi) { + /* Enable the NMI handler and raise NMI */ + this_cpu_write(ucode_ctrl.nmi_enabled, true); + apic->send_IPI(smp_processor_id(), NMI_VECTOR); + } else { + /* Just invoke the handler directly */ + microcode_update_handler(); + } return 0; } @@ -419,8 +447,14 @@ static int load_late_stop_cpus(void) */ store_cpu_caps(&prev_info); + if (microcode_ops->use_nmi) + static_branch_enable_cpuslocked(µcode_nmi_handler_enable); + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + if (microcode_ops->use_nmi) + static_branch_disable_cpuslocked(µcode_nmi_handler_enable); + /* Analyze the results */ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { switch (per_cpu(ucode_ctrl.result, cpu)) { diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index e5c5ddfd6831..905ed3b557fb 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -611,6 +611,7 @@ static struct microcode_ops microcode_intel_ops = { .collect_cpu_info = collect_cpu_info, .apply_microcode = apply_microcode_late, .finalize_late_load = finalize_late_load, + .use_nmi = IS_ENABLED(CONFIG_X86_64), }; static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 2db13aeb707b..a1fdfb6bd015 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -31,7 +31,8 @@ struct microcode_ops { enum ucode_state (*apply_microcode)(int cpu); int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); void (*finalize_late_load)(int result); - unsigned int nmi_safe : 1; + unsigned int nmi_safe : 1, + use_nmi : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 87aee638e1a5..cdca650af532 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs) instrumentation_begin(); + if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) + goto out; + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { -- Gitee From 83ddc72b7aa305867b0058a8e24fd82ae3b025a1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:06 +0200 Subject: [PATCH 094/953] x86/microcode: Protect against instrumentation ANBZ: #8003 commit 1582c0f4a21303792f523fe2839dd8433ee630c0 upstream. The wait for control loop in which the siblings are waiting for the microcode update on the primary thread must be protected against instrumentation as instrumentation can end up in #INT3, #DB or #PF, which then returns with IRET. That IRET reenables NMI which is the opposite of what the NMI rendezvous is trying to achieve. Intel-SIG: commit 1582c0f4a213 x86/microcode: Protect against instrumentation. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.545969323@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 111 ++++++++++++++++++++------- 1 file changed, 83 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 0437cb115952..48f8c3c29f1f 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -287,54 +287,65 @@ struct microcode_ctrl { DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static unsigned int loops_per_usec; static atomic_t late_cpus_in; -static bool wait_for_cpus(atomic_t *cnt) +static noinstr bool wait_for_cpus(atomic_t *cnt) { - unsigned int timeout; + unsigned int timeout, loops; - WARN_ON_ONCE(atomic_dec_return(cnt) < 0); + WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { - if (!atomic_read(cnt)) + if (!raw_atomic_read(cnt)) return true; - udelay(1); + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); /* If invoked directly, tickle the NMI watchdog */ - if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + } } /* Prevent the late comers from making progress and let them time out */ - atomic_inc(cnt); + raw_atomic_inc(cnt); return false; } -static bool wait_for_ctrl(void) +static noinstr bool wait_for_ctrl(void) { - unsigned int timeout; + unsigned int timeout, loops; for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { - if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) return true; - udelay(1); + + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); + /* If invoked directly, tickle the NMI watchdog */ - if (!microcode_ops->use_nmi && !(timeout % 1000)) + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + } } return false; } -static void load_secondary(unsigned int cpu) +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) { - unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); - enum ucode_state ret; - /* Initial rendezvous to ensure that all CPUs have arrived */ if (!wait_for_cpus(&late_cpus_in)) { - pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); - this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); - return; + raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return false; } /* @@ -344,9 +355,33 @@ static void load_secondary(unsigned int cpu) * scheduler, watchdogs etc. There is no way to safely evacuate the * machine. */ - if (!wait_for_ctrl()) - panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + if (wait_for_ctrl()) + return true; + + instrumentation_begin(); + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + instrumentation_end(); +} +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + if (!load_secondary_wait(ctrl_cpu)) { + instrumentation_begin(); + pr_err_once("load: %d CPUs timed out\n", + atomic_read(&late_cpus_in) - 1); + instrumentation_end(); + return; + } + + /* Primary thread completed. Allow to invoke instrumentable code */ + instrumentation_begin(); /* * If the primary succeeded then invoke the apply() callback, * otherwise copy the state from the primary thread. @@ -358,6 +393,7 @@ static void load_secondary(unsigned int cpu) this_cpu_write(ucode_ctrl.result, ret); this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + instrumentation_end(); } static void load_primary(unsigned int cpu) @@ -395,25 +431,43 @@ static void load_primary(unsigned int cpu) } } -static bool microcode_update_handler(void) +static noinstr bool microcode_update_handler(void) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu = raw_smp_processor_id(); - if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) + if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { + instrumentation_begin(); load_primary(cpu); - else + instrumentation_end(); + } else { load_secondary(cpu); + } + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + return true; } -bool microcode_nmi_handler(void) +/* + * Protection against instrumentation is required for CPUs which are not + * safe against an NMI which is delivered to the secondary SMT sibling + * while the primary thread updates the microcode. Instrumentation can end + * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI + * which is the opposite of what the NMI rendezvous is trying to achieve. + * + * The primary thread is safe versus instrumentation as the actual + * microcode update handles this correctly. It's only the sibling code + * path which must be NMI safe until the primary thread completed the + * update. + */ +bool noinstr microcode_nmi_handler(void) { - if (!this_cpu_read(ucode_ctrl.nmi_enabled)) + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) return false; - this_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.nmi_enabled, false); return microcode_update_handler(); } @@ -440,6 +494,7 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); + loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From df71e78094aa7e1fe0ab87f08d3d659a8447a901 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:07 +0200 Subject: [PATCH 095/953] x86/apic: Provide apic_force_nmi_on_cpu() ANBZ: #8003 commit 9cab5fb776d4367e26950cf759211e948335288e upstream. When SMT siblings are soft-offlined and parked in one of the play_dead() variants they still react on NMI, which is problematic on affected Intel CPUs. The default play_dead() variant uses MWAIT on modern CPUs, which is not guaranteed to be safe when updated concurrently. Right now late loading is prevented when not all SMT siblings are online, but as they still react on NMI, it is possible to bring them out of their park position into a trivial rendezvous handler. Provide a function which allows to do that. I does sanity checks whether the target is in the cpus_booted_once_mask and whether the APIC driver supports it. Mark X2APIC and XAPIC as capable, but exclude 32bit and the UV and NUMACHIP variants as that needs feedback from the relevant experts. Intel-SIG: commit 9cab5fb776d4 x86/apic: Provide apic_force_nmi_on_cpu(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.603100036@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/apic.h | 5 ++++- arch/x86/kernel/apic/apic_flat_64.c | 2 ++ arch/x86/kernel/apic/ipi.c | 8 ++++++++ arch/x86/kernel/apic/x2apic_cluster.c | 1 + arch/x86/kernel/apic/x2apic_phys.c | 1 + 5 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5af4ec1a0f71..17f2f28a495e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -276,7 +276,8 @@ struct apic { u32 disable_esr : 1, dest_mode_logical : 1, - x2apic_set_max_apicid : 1; + x2apic_set_max_apicid : 1, + nmi_to_offline_cpu : 1; u32 (*calc_dest_apicid)(unsigned int cpu); @@ -542,6 +543,8 @@ extern bool default_check_apicid_used(physid_mask_t *map, int apicid); extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); extern int default_cpu_present_to_apicid(int mps_cpu); +void apic_send_nmi_to_offline_cpu(unsigned int cpu); + #else /* CONFIG_X86_LOCAL_APIC */ static inline unsigned int read_apic_id(void) { return 0; } diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 032a84e2c3cc..cd16228611ce 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, @@ -175,6 +176,7 @@ static struct apic apic_physflat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index a44ba7209ef3..edad86f32e38 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -97,6 +97,14 @@ void native_send_call_func_ipi(const struct cpumask *mask) __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } +void apic_send_nmi_to_offline_cpu(unsigned int cpu) +{ + if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu)) + return; + if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask))) + return; + apic->send_IPI(cpu, NMI_VECTOR); +} #endif /* CONFIG_SMP */ static inline int __prepare_ICR2(unsigned int mask) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index affbff65e497..a8306089c91b 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 788cdb4ee394..c8ac1b12b8ac 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, -- Gitee From c03318a71d619a3825aef27e8f30608104a6a7ed Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:08 +0200 Subject: [PATCH 096/953] x86/microcode: Handle "offline" CPUs correctly ANBZ: #8003 commit 8f849ff63bcbc77670da03cb8f2b78b06257f455 upstream. Offline CPUs need to be parked in a safe loop when microcode update is in progress on the primary CPU. Currently, offline CPUs are parked in mwait_play_dead(), and for Intel CPUs, its not a safe instruction, because the MWAIT instruction can be patched in the new microcode update that can cause instability. - Add a new microcode state 'UCODE_OFFLINE' to report status on per-CPU basis. - Force NMI on the offline CPUs. Wake up offline CPUs while the update is in progress and then return them back to mwait_play_dead() after microcode update is complete. Intel-SIG: commit 8f849ff63bcb x86/microcode: Handle "offline" CPUs correctly. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.660850472@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 1 + arch/x86/kernel/cpu/microcode/core.c | 112 ++++++++++++++++++++++- arch/x86/kernel/cpu/microcode/internal.h | 1 + arch/x86/kernel/nmi.c | 5 +- 4 files changed, 113 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 82924828a94b..0ee6ed0ff2bf 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -73,6 +73,7 @@ static inline u32 intel_get_microcode_revision(void) #endif /* !CONFIG_CPU_SUP_INTEL */ bool microcode_nmi_handler(void); +void microcode_offline_nmi_handler(void); #ifdef CONFIG_MICROCODE_LATE_LOADING DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 48f8c3c29f1f..bd8f7ffab96c 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -287,8 +287,9 @@ struct microcode_ctrl { DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static atomic_t late_cpus_in, offline_in_nmi; static unsigned int loops_per_usec; -static atomic_t late_cpus_in; +static cpumask_t cpu_offline_mask; static noinstr bool wait_for_cpus(atomic_t *cnt) { @@ -396,7 +397,7 @@ static noinstr void load_secondary(unsigned int cpu) instrumentation_end(); } -static void load_primary(unsigned int cpu) +static void __load_primary(unsigned int cpu) { struct cpumask *secondaries = topology_sibling_cpumask(cpu); enum sibling_ctrl ctrl; @@ -431,6 +432,67 @@ static void load_primary(unsigned int cpu) } } +static bool kick_offline_cpus(unsigned int nr_offl) +{ + unsigned int cpu, timeout; + + for_each_cpu(cpu, &cpu_offline_mask) { + /* Enable the rendezvous handler and send NMI */ + per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; + apic_send_nmi_to_offline_cpu(cpu); + } + + /* Wait for them to arrive */ + for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { + if (atomic_read(&offline_in_nmi) == nr_offl) + return true; + udelay(1); + } + /* Let the others time out */ + return false; +} + +static void release_offline_cpus(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, &cpu_offline_mask) + per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; +} + +static void load_primary(unsigned int cpu) +{ + unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); + bool proceed = true; + + /* Kick soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + proceed = kick_offline_cpus(nr_offl); + + /* If the soft-offlined CPUs did not respond, abort */ + if (proceed) + __load_primary(cpu); + + /* Unconditionally release soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + release_offline_cpus(); +} + +/* + * Minimal stub rendezvous handler for soft-offlined CPUs which participate + * in the NMI rendezvous to protect against a concurrent NMI on affected + * CPUs. + */ +void noinstr microcode_offline_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return; + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); + raw_atomic_inc(&offline_in_nmi); + wait_for_ctrl(); +} + static noinstr bool microcode_update_handler(void) { unsigned int cpu = raw_smp_processor_id(); @@ -487,6 +549,7 @@ static int load_cpus_stopped(void *unused) static int load_late_stop_cpus(void) { unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + unsigned int nr_offl, offline = 0; int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; @@ -494,6 +557,7 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&offline_in_nmi, 0); loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* @@ -516,6 +580,7 @@ static int load_late_stop_cpus(void) case UCODE_UPDATED: updated++; break; case UCODE_TIMEOUT: timedout++; break; case UCODE_OK: siblings++; break; + case UCODE_OFFLINE: offline++; break; default: failed++; break; } } @@ -527,6 +592,13 @@ static int load_late_stop_cpus(void) /* Nothing changed. */ if (!failed && !timedout) return 0; + + nr_offl = cpumask_weight(&cpu_offline_mask); + if (offline < nr_offl) { + pr_warn("%u offline siblings did not respond.\n", + nr_offl - atomic_read(&offline_in_nmi)); + return -EIO; + } pr_err("update failed: %u CPUs failed %u CPUs timed out\n", failed, timedout); return -EIO; @@ -560,19 +632,49 @@ static int load_late_stop_cpus(void) * modern CPUs uses MWAIT, which is also not guaranteed to be safe * against a microcode update which affects MWAIT. * - * 2) Initialize the per CPU control structure + * As soft-offlined CPUs still react on NMIs, the SMT sibling + * restriction can be lifted when the vendor driver signals to use NMI + * for rendezvous and the APIC provides a mechanism to send an NMI to a + * soft-offlined CPU. The soft-offlined CPUs are then able to + * participate in the rendezvous in a trivial stub handler. + * + * 2) Initialize the per CPU control structure and create a cpumask + * which contains "offline"; secondary threads, so they can be handled + * correctly by a control CPU. */ static bool setup_cpus(void) { struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; + bool allow_smt_offline; unsigned int cpu; + allow_smt_offline = microcode_ops->nmi_safe || + (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); + + cpumask_clear(&cpu_offline_mask); + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + /* + * Offline CPUs sit in one of the play_dead() functions + * with interrupts disabled, but they still react on NMIs + * and execute arbitrary code. Also MWAIT being updated + * while the offline CPU sits there is not necessarily safe + * on all CPU variants. + * + * Mark them in the offline_cpus mask which will be handled + * by CPU0 later in the update process. + * + * Ensure that the primary thread is online so that it is + * guaranteed that all cores are updated. + */ if (!cpu_online(cpu)) { - if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { - pr_err("CPU %u not online\n", cpu); + if (topology_is_primary_thread(cpu) || !allow_smt_offline) { + pr_err("CPU %u not online, loading aborted\n", cpu); return false; } + cpumask_set_cpu(cpu, &cpu_offline_mask); + per_cpu(ucode_ctrl, cpu) = ctrl; + continue; } /* diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index a1fdfb6bd015..aaebbe7ef126 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -17,6 +17,7 @@ enum ucode_state { UCODE_NFOUND, UCODE_ERROR, UCODE_TIMEOUT, + UCODE_OFFLINE, }; struct microcode_ops { diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index cdca650af532..6da2cfa23c29 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -502,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) raw_atomic_long_inc(&nsp->idt_calls); - if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) { + if (microcode_nmi_handler_enabled()) + microcode_offline_nmi_handler(); return; + } if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); -- Gitee From b5cfd3822108344044509de3d28ccb56a4bbef13 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:27:11 +0800 Subject: [PATCH 097/953] x86/microcode: Prepare for minimal revision check ANBZ: #8003 commit 9407bda845dd19756e276d4f3abc15a20777ba45 upstream. Applying microcode late can be fatal for the running kernel when the update changes functionality which is in use already in a non-compatible way, e.g. by removing a CPUID bit. There is no way for admins which do not have access to the vendors deep technical support to decide whether late loading of such a microcode is safe or not. Intel has added a new field to the microcode header which tells the minimal microcode revision which is required to be active in the CPU in order to be safe. Provide infrastructure for handling this in the core code and a command line switch which allows to enforce it. If the update is considered safe the kernel is not tainted and the annoying warning message not emitted. If it's enforced and the currently loaded microcode revision is not safe for late loading then the load is aborted. Intel-SIG: commit 9407bda845dd x86/microcode: Prepare for minimal revision check. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211724.079611170@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- .../admin-guide/kernel-parameters.txt | 5 ++++ arch/x86/Kconfig | 23 ++++++++++++++++++- arch/x86/kernel/cpu/microcode/amd.c | 3 +++ arch/x86/kernel/cpu/microcode/core.c | 19 +++++++++++---- arch/x86/kernel/cpu/microcode/intel.c | 3 +++ arch/x86/kernel/cpu/microcode/internal.h | 2 ++ 6 files changed, 49 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 7a36124dde5e..dfce4604cc9d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3293,6 +3293,11 @@ mga= [HW,DRM] + microcode.force_minrev= [X86] + Format: + Enable or disable the microcode minimal revision + enforcement for the runtime microcode loader. + min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this physical address is ignored. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1fbb7ae81da3..4c75a7968948 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1326,7 +1326,28 @@ config MICROCODE_LATE_LOADING is a tricky business and should be avoided if possible. Just the sequence of synchronizing all cores and SMT threads is one fragile dance which does not guarantee that cores might not softlock after the loading. Therefore, - use this at your own risk. Late loading taints the kernel too. + use this at your own risk. Late loading taints the kernel unless the + microcode header indicates that it is safe for late loading via the + minimal revision check. This minimal revision check can be enforced on + the kernel command line with "microcode.minrev=Y". + +config MICROCODE_LATE_FORCE_MINREV + bool "Enforce late microcode loading minimal revision check" + default n + depends on MICROCODE_LATE_LOADING + help + To prevent that users load microcode late which modifies already + in use features, newer microcode patches have a minimum revision field + in the microcode header, which tells the kernel which minimum + revision must be active in the CPU to safely load that new microcode + late into the running system. If disabled the check will not + be enforced but the kernel will be tainted when the minimal + revision check fails. + + This minimal revision check can also be controlled via the + "microcode.minrev" parameter on the kernel command line. + + If unsure say Y. config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index f0b246eda09c..2ba4f7dd445a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -892,6 +892,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; + if (force_minrev) + return UCODE_NFOUND; + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index bd8f7ffab96c..7196ad323c4b 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -50,6 +50,9 @@ static struct microcode_ops *microcode_ops; #endif bool dis_ucode_ldr = true; +bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); +module_param(force_minrev, bool, S_IRUSR | S_IWUSR); + /* * Synchronization. * @@ -546,15 +549,17 @@ static int load_cpus_stopped(void *unused) return 0; } -static int load_late_stop_cpus(void) +static int load_late_stop_cpus(bool is_safe) { unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; unsigned int nr_offl, offline = 0; int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; - pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); - pr_err("You should switch to early loading, if possible.\n"); + if (!is_safe) { + pr_err("Late microcode loading without minimal revision check.\n"); + pr_err("You should switch to early loading, if possible.\n"); + } atomic_set(&late_cpus_in, num_online_cpus()); atomic_set(&offline_in_nmi, 0); @@ -604,7 +609,9 @@ static int load_late_stop_cpus(void) return -EIO; } - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + if (!is_safe || failed || timedout) + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); if (failed || timedout) { pr_err("load incomplete. %u CPUs timed out or failed\n", @@ -694,7 +701,9 @@ static int load_late_locked(void) switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { case UCODE_NEW: - return load_late_stop_cpus(); + return load_late_stop_cpus(false); + case UCODE_NEW_SAFE: + return load_late_stop_cpus(true); case UCODE_NFOUND: return -ENOENT; default: diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 905ed3b557fb..14aa4c6d4c14 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -480,6 +480,9 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; + if (force_minrev) + return UCODE_NFOUND; + while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; unsigned int mc_size, data_size; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index aaebbe7ef126..980ef806b377 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -13,6 +13,7 @@ struct device; enum ucode_state { UCODE_OK = 0, UCODE_NEW, + UCODE_NEW_SAFE, UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, @@ -94,6 +95,7 @@ static inline unsigned int x86_cpuid_family(void) } extern bool dis_ucode_ldr; +extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); -- Gitee From c15fcf7f92d29767a905c60741db76c4b02ec0df Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Mon, 2 Oct 2023 14:00:11 +0200 Subject: [PATCH 098/953] x86/microcode/intel: Add a minimum required revision for late loading ANBZ: #8003 commit cf5ab01c87030a085e211a0a327535932ec6f719 upstream. In general users, don't have the necessary information to determine whether late loading of a new microcode version is safe and does not modify anything which the currently running kernel uses already, e.g. removal of CPUID bits or behavioural changes of MSRs. To address this issue, Intel has added a "minimum required version" field to a previously reserved field in the microcode header. Microcode updates should only be applied if the current microcode version is equal to, or greater than this minimum required version. Thomas made some suggestions on how meta-data in the microcode file could provide Linux with information to decide if the new microcode is suitable candidate for late loading. But even the "simpler" option requires a lot of metadata and corresponding kernel code to parse it, so the final suggestion was to add the 'minimum required version' field in the header. When microcode changes visible features, microcode will set the minimum required version to its own revision which prevents late loading. Old microcode blobs have the minimum revision field always set to 0, which indicates that there is no information and the kernel considers it unsafe. This is a pure OS software mechanism. The hardware/firmware ignores this header field. For early loading there is no restriction because OS visible features are enumerated after the early load and therefore a change has no effect. The check is always enabled, but by default not enforced. It can be enforced via Kconfig or kernel command line. If enforced, the kernel refuses to late load microcode with a minimum required version field which is zero or when the currently loaded microcode revision is smaller than the minimum required revision. If not enforced the load happens independent of the revision check to stay compatible with the existing behaviour, but it influences the decision whether the kernel is tainted or not. If the check signals that the late load is safe, then the kernel is not tainted. Early loading is not affected by this. [ tglx: Massaged changelog and fixed up the implementation ] Intel-SIG: commit cf5ab01c8703 x86/microcode/intel: Add a minimum required revision for late loading. Microcode restructuring backport. Suggested-by: Thomas Gleixner Signed-off-by: Ashok Raj Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.776467264@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 3 ++- arch/x86/kernel/cpu/microcode/intel.c | 37 ++++++++++++++++++++++++--- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 0ee6ed0ff2bf..695e569159c1 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -38,7 +38,8 @@ struct microcode_header_intel { unsigned int datasize; unsigned int totalsize; unsigned int metasize; - unsigned int reserved[2]; + unsigned int min_req_ver; + unsigned int reserved; }; struct microcode_intel { diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 14aa4c6d4c14..6024feb98d29 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -473,16 +473,40 @@ static enum ucode_state apply_microcode_late(int cpu) return ret; } +static bool ucode_validate_minrev(struct microcode_header_intel *mc_header) +{ + int cur_rev = boot_cpu_data.microcode; + + /* + * When late-loading, ensure the header declares a minimum revision + * required to perform a late-load. The previously reserved field + * is 0 in older microcode blobs. + */ + if (!mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n"); + return false; + } + + /* + * Check whether the current revision is either greater or equal to + * to the minimum revision specified in the header. + */ + if (cur_rev < mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev); + pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver); + return false; + } + return true; +} + static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + bool is_safe, new_is_safe = false; int cur_rev = uci->cpu_sig.rev; unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; - if (force_minrev) - return UCODE_NFOUND; - while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; unsigned int mc_size, data_size; @@ -525,9 +549,14 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!intel_find_matching_signature(mc, &uci->cpu_sig)) continue; + is_safe = ucode_validate_minrev(&mc_header); + if (force_minrev && !is_safe) + continue; + kvfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; + new_is_safe = is_safe; mc = NULL; } @@ -539,7 +568,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) return UCODE_NFOUND; ucode_patch_late = (struct microcode_intel *)new_mc; - return UCODE_NEW; + return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW; fail: kvfree(mc); -- Gitee From f5e8442f3cc94ea5365a17fd5e569e6b24874242 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Mon, 13 Nov 2023 11:40:26 +0800 Subject: [PATCH 099/953] x86/setup: Make relocated_ramdisk a local variable of relocate_initrd() ANBZ: #8003 commit f7a25cf1d4707da39b80df96a3be8a8abd07c35b upstream. After 0b62f6cb0773 ("x86/microcode/32: Move early loading after paging enable"), the global variable relocated_ramdisk is no longer used anywhere except for the relocate_initrd() function. Make it a local variable of that function. Intel-SIG: commit f7a25cf1d470 Make relocated_ramdisk a local variable of relocate_initrd(). Microcode restructuring backport. Signed-off-by: Yuntao Wang Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Baoquan He Link: https://lore.kernel.org/r/20231113034026.130679-1-ytcoode@gmail.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/setup.h | 2 -- arch/x86/kernel/setup.c | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index bf483fcb4e57..5c83729c8e71 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,8 +31,6 @@ #include #include -extern u64 relocated_ramdisk; - /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e63a8d05ce29..09c3ea904ba5 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -225,8 +225,6 @@ static void __init reserve_brk(void) _brk_start = 0; } -u64 relocated_ramdisk; - #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -260,7 +258,7 @@ static void __init relocate_initrd(void) u64 area_size = PAGE_ALIGN(ramdisk_size); /* We need to move the initrd down into directly mapped mem */ - relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, + u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, PFN_PHYS(max_pfn_mapped)); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", -- Gitee From fd4859af4142f61217aebe8689116d096b9e3bc5 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 24 Dec 2018 16:18:53 +0800 Subject: [PATCH 100/953] anolis: drivers/virtio: add vring_force_dma_api boot param ANBZ: #8326 Prior to xdragon platform 20181230 release (e.g. 0930 release), vring_use_dma_api() is required to return 'true' unconditionally. Introduce a new kernel boot parameter called "vring_force_dma_api" to control the behavior, boot xdragon host with "vring_force_dma_api" command line to make ENI hotplug work, so that normal ECS hosts keep the original behavior. Also supports virtio_ring.vring_force_dma_api=1/0 when virtio_ring is built as module. Signed-off-by: Eryu Guan Signed-off-by: Shannon Zhao Signed-off-by: Joseph Qi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2783 --- .../admin-guide/kernel-parameters.txt | 5 ++++ drivers/virtio/virtio_ring.c | 23 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index dfce4604cc9d..28cda809d805 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -7025,6 +7025,11 @@ vmpoff= [KNL,S390] Perform z/VM CP command after power off. Format: + vring_force_dma_api + Force virtio vring to use dma api. This is only needed + on xdragon platform (prior to 20181230 release, e.g. + 0930 release). + vsyscall= [X86-64] Controls the behavior of vsyscalls (i.e. calls to fixed addresses of 0xffffffffff600x00 from legacy diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 6f7e5010a673..e0fe768b8443 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef DEBUG @@ -251,6 +252,21 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, return (vq->indirect && total_sg > 1 && vq->vq.num_free); } +static bool vring_force_dma_api; + +#ifdef MODULE +module_param(vring_force_dma_api, bool, 0640); +#else +static int __init vring_dma_api_setup(char *str) +{ + vring_force_dma_api = true; + printk(KERN_INFO "Force vring dma api enabled\n"); + + return 0; +} +__setup("vring_force_dma_api", vring_dma_api_setup); +#endif + /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. @@ -279,6 +295,13 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, static bool vring_use_dma_api(const struct virtio_device *vdev) { + /* + * Prior to xdragon platform 20181230 release (e.g. 0930 release), we + * need this hack to get ENI hotplug to work. + */ + if (vring_force_dma_api) + return true; + if (!virtio_has_dma_quirk(vdev)) return true; -- Gitee From 6fa1816f85a3e77a6a2709d2a02faddee40e3ec6 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Tue, 27 Feb 2024 14:51:39 +0800 Subject: [PATCH 101/953] anolis: kabi: Introduce kabi macro for anolis cloud-kernel ANBZ: #3879 Add kabi generic series of macros head file, this series of macros should be used for keeping related struct size not change. And also introduce CONFIG_CK_KABI_RESERVE and CONFIG_CK_KABI_SIZE_ALIGN_CHECKS configs. Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2792 --- arch/arm64/configs/anolis-debug_defconfig | 2 + arch/arm64/configs/anolis_defconfig | 2 + arch/x86/configs/anolis-debug_defconfig | 2 + arch/x86/configs/anolis_defconfig | 2 + include/linux/ck_kabi.h | 532 ++++++++++++++++++++++ init/Kconfig | 18 + 6 files changed, 558 insertions(+) create mode 100644 include/linux/ck_kabi.h diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 247a3d434dab..b3c3c3da6168 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -957,6 +957,8 @@ CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index ffd410167da4..dadd27949f52 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -977,6 +977,8 @@ CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index a61c59967e02..b5adf870d839 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1016,6 +1016,8 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 937a54d025e9..be00c2ce6add 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -1011,6 +1011,8 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/include/linux/ck_kabi.h b/include/linux/ck_kabi.h new file mode 100644 index 000000000000..a2ecc950c93a --- /dev/null +++ b/include/linux/ck_kabi.h @@ -0,0 +1,532 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ck_kabi.h - Anolis Cloud-Kernel kABI abstraction header + * + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * Copyright (c) 2023 Guixin Liu + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + * + * These kabi macros hide the changes from the kabi checker and from the + * process that computes the exported symbols' checksums. + * They have 2 variants: one (defined under __GENKSYMS__) used when + * generating the checksums, and the other used when building the kernel's + * binaries. + * + * The use of these macros does not guarantee that the usage and modification + * of code is correct. As with all Anolis only changes, an engineer must + * explain why the use of the macro is valid in the patch containing the + * changes. + * + */ + +#ifndef _LINUX_CK_KABI_H +#define _LINUX_CK_KABI_H + +#include +#include +#include + +/* + * NOTE + * Unless indicated otherwise, don't use ';' after these macros as it + * messes up the kABI checker by changing what the resulting token string + * looks like. Instead let the macros add the ';' so it can be properly + * hidden from the kABI checker (mainly for CK_KABI_EXTEND, but applied to + * most macros for uniformity). + * + * + * CK_KABI_CONST + * Adds a new const modifier to a function parameter preserving the old + * checksum. + * + * CK_KABI_ADD_MODIFIER + * Adds a new modifier to a function parameter or a typedef, preserving + * the old checksum. Useful e.g. for adding rcu annotations or changing + * int to unsigned. Beware that this may change the semantics; if you're + * sure this is safe, always explain why binary compatibility with 3rd + * party modules is retained. + * + * CK_KABI_DEPRECATE + * Marks the element as deprecated and make it unusable by modules while + * keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_DEPRECATE_FN + * Marks the function pointer as deprecated and make it unusable by modules + * while keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_EXTEND + * Adds a new field to a struct. This must always be added to the end of + * the struct. Before using this macro, make sure this is actually safe + * to do - there is a number of conditions under which it is *not* safe. + * In particular (but not limited to), this macro cannot be used: + * - if the struct in question is embedded in another struct, or + * - if the struct is allocated by drivers either statically or + * dynamically, or + * - if the struct is allocated together with driver data (an example of + * such behavior is struct net_device or struct request). + * + * CK_KABI_EXTEND_WITH_SIZE + * Adds a new element (usually a struct) to a struct and reserves extra + * space for the new element. The provided 'size' is the total space to + * be added in longs (i.e. it's 8 * 'size' bytes), including the size of + * the added element. It is automatically checked that the new element + * does not overflow the reserved space, now nor in the future. However, + * no attempt is done to check the content of the added element (struct) + * for kABI conformance - kABI checking inside the added element is + * effectively switched off. + * For any struct being added by CK_KABI_EXTEND_WITH_SIZE, it is + * recommended its content to be documented as not covered by kABI + * guarantee. + * + * CK_KABI_FILL_HOLE + * Fills a hole in a struct. + * + * Warning: only use if a hole exists for _all_ arches. Use pahole to verify. + * + * CK_KABI_RENAME + * Renames an element without changing its type. This macro can be used in + * bitfields, for example. + * + * NOTE: this macro does not add the final ';' + * + * CK_KABI_REPLACE + * Replaces the _orig field by the _new field. The size of the occupied + * space is preserved, it's fine if the _new field is smaller than the + * _orig field. If a _new field is larger or has a different alignment, + * compilation will abort. + * + * CK_KABI_REPLACE_SPLIT + * Works the same as CK_KABI_REPLACE but replaces a single _orig field by + * multiple new fields. The checks for size and alignment done by + * CK_KABI_REPLACE are still applied. + * + * CK_KABI_HIDE_INCLUDE + * Hides the given include file from kABI checksum computations. This is + * used when a newly added #include makes a previously opaque struct + * visible. + * + * Example usage: + * #include CK_KABI_HIDE_INCLUDE() + * + * CK_KABI_FAKE_INCLUDE + * Pretends inclusion of the given file for kABI checksum computations. + * This is used when upstream removed a particular #include but that made + * some structures opaque that were previously visible and is causing kABI + * checker failures. + * + * Example usage: + * #include CK_KABI_FAKE_INCLUDE() + * + * CK_KABI_RESERVE + * Adds a reserved field to a struct. This is done prior to kABI freeze + * for structs that cannot be expanded later using CK_KABI_EXTEND (for + * example because they are embedded in another struct or because they are + * allocated by drivers or because they use unusual memory layout). The + * size of the reserved field is 'unsigned long' and is assumed to be + * 8 bytes. + * + * The argument is a number unique for the given struct; usually, multiple + * CK_KABI_RESERVE macros are added to a struct with numbers starting from + * one. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_RESERVE(2) + * CK_KABI_RESERVE(3) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE + * Uses a previously reserved field or multiple fields. The arguments are + * one or more numbers assigned to CK_KABI_RESERVE, followed by a field to + * be put in their place. The compiler ensures that the new field is not + * larger than the reserved area. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_USE(1, int b) + * CK_KABI_USE(2, 3, int c[3]) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE_SPLIT + * Works the same as CK_KABI_USE but replaces a single reserved field by + * multiple new fields. + * + * CK_KABI_AUX_EMBED + * CK_KABI_AUX_PTR + * Adds an extension of a struct in the form of "auxiliary structure". + * This is done prior to kABI freeze for structs that cannot be expanded + * later using CK_KABI_EXTEND. See also CK_KABI_RESERVED, these two + * approaches can (and often are) combined. + * + * To use this for 'struct foo' (the "base structure"), define a new + * structure called 'struct foo_ck_reserved'; this new struct is called "auxiliary + * structure". Then add CK_KABI_AUX_EMBED or CK_KABI_AUX_PTR to the end + * of the base structure. The argument is the name of the base structure, + * without the 'struct' keyword. + * + * CK_KABI_AUX_PTR stores a pointer to the aux structure in the base + * struct. The lifecycle of the aux struct needs to be properly taken + * care of. + * + * CK_KABI_AUX_EMBED embeds the aux struct into the base struct. This + * cannot be used when the base struct is itself embedded into another + * struct, allocated in an array, etc. + * + * Both approaches (ptr and embed) work correctly even when the aux struct + * is allocated by modules. To ensure this, the code responsible for + * allocation/assignment of the aux struct has to properly set the size of + * the aux struct; see the CK_KABI_AUX_SET_SIZE and CK_KABI_AUX_INIT_SIZE + * macros. + * + * New fields can be later added to the auxiliary structure, always to its + * end. Note the auxiliary structure cannot be shrunk in size later (i.e., + * fields cannot be removed, only deprecated). Any code accessing fields + * from the aux struct must guard the access using the CK_KABI_AUX macro. + * The access itself is then done via a '_ck_reserved' field in the base struct. + * + * The auxiliary structure is not guaranteed for access by modules unless + * explicitly commented as such in the declaration of the aux struct + * itself or some of its elements. + * + * Example: + * + * struct foo_ck_reserved { + * int newly_added; + * }; + * + * struct foo { + * bool big_hammer; + * CK_KABI_AUX_PTR(foo) + * }; + * + * void use(struct foo *f) + * { + * if (CK_KABI_AUX(f, foo, newly_added)) + * f->_ck_reserved->newly_added = 123; + * else + * // the field 'newly_added' is not present in the passed + * // struct, fall back to old behavior + * f->big_hammer = true; + * } + * + * static struct foo_ck_reserved my_foo_ck_reserved { + * .newly_added = 0; + * } + * + * static struct foo my_foo = { + * .big_hammer = false, + * ._ck_reserved = &my_foo_ck_reserved, + * CK_KABI_AUX_INIT_SIZE(foo) + * }; + * + * CK_KABI_USE_AUX_PTR + * Creates an auxiliary structure post kABI freeze. This works by using + * two reserved fields (thus there has to be two reserved fields still + * available) and converting them to CK_KABI_AUX_PTR. + * + * Example: + * + * struct foo_ck_reserved { + * }; + * + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_USE_AUX_PTR(2, 3, foo) + * }; + * + * CK_KABI_AUX_SET_SIZE + * CK_KABI_AUX_INIT_SIZE + * Calculates and stores the size of the auxiliary structure. + * + * CK_KABI_AUX_SET_SIZE is for dynamically allocated base structs, + * CK_KABI_AUX_INIT_SIZE is for statically allocated case structs. + * + * These macros must be called from the allocation (CK_KABI_AUX_SET_SIZE) + * or declaration (CK_KABI_AUX_INIT_SIZE) site, regardless of whether + * that happens in the kernel or in a module. Without calling one of + * these macros, the aux struct will appear to have no fields to the + * kernel. + * + * Note: since CK_KABI_AUX_SET_SIZE is intended to be invoked outside of + * a struct definition, it does not add the semicolon and must be + * terminated by semicolon by the caller. + * + * CK_KABI_AUX + * Verifies that the given field exists in the given auxiliary structure. + * This MUST be called prior to accessing that field; failing to do that + * may lead to invalid memory access. + * + * The first argument is a pointer to the base struct, the second argument + * is the name of the base struct (without the 'struct' keyword), the + * third argument is the field name. + * + * This macro works for structs extended by either of CK_KABI_AUX_EMBED, + * CK_KABI_AUX_PTR and CK_KABI_USE_AUX_PTR. + * + * CK_KABI_FORCE_CHANGE + * Force change of the symbol checksum. The argument of the macro is a + * version for cases we need to do this more than once. + * + * This macro does the opposite: it changes the symbol checksum without + * actually changing anything about the exported symbol. It is useful for + * symbols that are not whitelisted, we're changing them in an + * incompatible way and want to prevent 3rd party modules to silently + * corrupt memory. Instead, by changing the symbol checksum, such modules + * won't be loaded by the kernel. This macro should only be used as a + * last resort when all other KABI workarounds have failed. + * + * CK_KABI_EXCLUDE + * !!! WARNING: DANGEROUS, DO NOT USE unless you are aware of all the !!! + * !!! implications. This should be used ONLY EXCEPTIONALLY and only !!! + * !!! under specific circumstances. Very likely, this macro does not !!! + * !!! do what you expect it to do. Note that any usage of this macro !!! + * !!! MUST be paired with a CK_KABI_FORCE_CHANGE annotation of !!! + * !!! a suitable symbol (or an equivalent safeguard) and the commit !!! + * !!! log MUST explain why the chosen solution is appropriate. !!! + * + * Exclude the element from checksum generation. Any such element is + * considered not to be part of the kABI whitelist and may be changed at + * will. Note however that it's the responsibility of the developer + * changing the element to ensure 3rd party drivers using this element + * won't panic, for example by not allowing them to be loaded. That can + * be achieved by changing another, non-whitelisted symbol they use, + * either by nature of the change or by using CK_KABI_FORCE_CHANGE. + * + * Also note that any change to the element must preserve its size. Change + * of the size is not allowed and would constitute a silent kABI breakage. + * Beware that the CK_KABI_EXCLUDE macro does not do any size checks. + * + * CK_KABI_BROKEN_INSERT + * CK_KABI_BROKEN_REMOVE + * Insert a field to the middle of a struct / delete a field from a struct. + * Note that this breaks kABI! It can be done only when it's certain that + * no 3rd party driver can validly reach into the struct. A typical + * example is a struct that is: both (a) referenced only through a long + * chain of pointers from another struct that is part of a whitelisted + * symbol and (b) kernel internal only, it should have never been visible + * to genksyms in the first place. + * + * Another example are structs that are explicitly exempt from kABI + * guarantee but we did not have enough foresight to use CK_KABI_EXCLUDE. + * In this case, the warning for CK_KABI_EXCLUDE applies. + * + * A detailed explanation of correctness of every CK_KABI_BROKEN_* macro + * use is especially important. + * + * CK_KABI_BROKEN_INSERT_BLOCK + * CK_KABI_BROKEN_REMOVE_BLOCK + * A version of CK_KABI_BROKEN_INSERT / REMOVE that allows multiple fields + * to be inserted or removed together. All fields need to be terminated + * by ';' inside(!) the macro parameter. The macro itself must not be + * terminated by ';'. + * + * CK_KABI_BROKEN_REPLACE + * Replace a field by a different one without doing any checking. This + * allows replacing a field by another with a different size. Similarly + * to other CK_KABI_BROKEN macros, use of this indicates a kABI breakage. + * + * CK_KABI_BROKEN_INSERT_ENUM + * CK_KABI_BROKEN_REMOVE_ENUM + * Insert a field to the middle of an enumaration type / delete a field from + * an enumaration type. Note that this can break kABI especially if the + * number of enum fields is used in an array within a structure. It can be + * done only when it is certain that no 3rd party driver will use the + * enumeration type or a structure that embeds an array with size determined + * by an enumeration type. + * + * CK_KABI_EXTEND_ENUM + * Adds a new field to an enumeration type. This must always be added to + * the end of the enum. Before using this macro, make sure this is actually + * safe to do. + */ + +#ifdef __GENKSYMS__ + +# define CK_KABI_CONST +# define CK_KABI_ADD_MODIFIER(_new) +# define CK_KABI_EXTEND(_new) +# define CK_KABI_FILL_HOLE(_new) +# define CK_KABI_FORCE_CHANGE(ver) __attribute__((ck_kabi_change ## ver)) +# define CK_KABI_RENAME(_orig, _new) _orig +# define CK_KABI_HIDE_INCLUDE(_file) +# define CK_KABI_FAKE_INCLUDE(_file) _file +# define CK_KABI_BROKEN_INSERT(_new) +# define CK_KABI_BROKEN_REMOVE(_orig) _orig; +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) _orig +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _orig; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) _orig, +# define CK_KABI_EXTEND_ENUM(_new) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type _orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) _type (*_orig)(_args) +# define _CK_KABI_REPLACE(_orig, _new) _orig +# define _CK_KABI_EXCLUDE(_elem) + +#else + +# define CK_KABI_ALIGN_WARNING ". Disable CONFIG_CK_KABI_SIZE_ALIGN_CHECKS if debugging." + +# define CK_KABI_CONST const +# define CK_KABI_ADD_MODIFIER(_new) _new +# define CK_KABI_EXTEND(_new) _new; +# define CK_KABI_FILL_HOLE(_new) _new; +# define CK_KABI_FORCE_CHANGE(ver) +# define CK_KABI_RENAME(_orig, _new) _new +# define CK_KABI_HIDE_INCLUDE(_file) _file +# define CK_KABI_FAKE_INCLUDE(_file) +# define CK_KABI_BROKEN_INSERT(_new) _new; +# define CK_KABI_BROKEN_REMOVE(_orig) +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) _new +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _new; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) _new, +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) +# define CK_KABI_EXTEND_ENUM(_new) _new, + +#if IS_BUILTIN(CONFIG_CK_KABI_SIZE_ALIGN_CHECKS) +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_new) " is larger than " __stringify(_orig) CK_KABI_ALIGN_WARNING); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_orig) " is not aligned the same as " __stringify(_new) CK_KABI_ALIGN_WARNING); \ + } +# define __CK_KABI_CHECK_SIZE(_item, _size) \ + _Static_assert(sizeof(struct{_item;}) <= _size, \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_item) " is larger than the reserved size (" __stringify(_size) " bytes)" CK_KABI_ALIGN_WARNING) +#else +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) +# define __CK_KABI_CHECK_SIZE(_item, _size) +#endif + +#define CK_KABI_UNIQUE_ID __PASTE(ck_kabi_hidden_, __LINE__) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type ck_reserved_##_orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _type (* ck_reserved_##_orig)(_args) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } CK_KABI_UNIQUE_ID; \ + __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } +#else +# define _CK_KABI_REPLACE(_orig, _new) CK_KABI_BROKEN_REPLACE(_orig, _new) +#endif + +# define _CK_KABI_EXCLUDE(_elem) _elem + +#endif /* __GENKSYMS__ */ + +# define CK_KABI_DEPRECATE(_type, _orig) _CK_KABI_DEPRECATE(_type, _orig); +# define CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _CK_KABI_DEPRECATE_FN(_type, _orig, _args); +# define CK_KABI_REPLACE(_orig, _new) _CK_KABI_REPLACE(_orig, _new); + +#define _CK_KABI_REPLACE1(_new) _new; +#define _CK_KABI_REPLACE2(_new, ...) _new; _CK_KABI_REPLACE1(__VA_ARGS__) +#define _CK_KABI_REPLACE3(_new, ...) _new; _CK_KABI_REPLACE2(__VA_ARGS__) +#define _CK_KABI_REPLACE4(_new, ...) _new; _CK_KABI_REPLACE3(__VA_ARGS__) +#define _CK_KABI_REPLACE5(_new, ...) _new; _CK_KABI_REPLACE4(__VA_ARGS__) +#define _CK_KABI_REPLACE6(_new, ...) _new; _CK_KABI_REPLACE5(__VA_ARGS__) +#define _CK_KABI_REPLACE7(_new, ...) _new; _CK_KABI_REPLACE6(__VA_ARGS__) +#define _CK_KABI_REPLACE8(_new, ...) _new; _CK_KABI_REPLACE7(__VA_ARGS__) +#define _CK_KABI_REPLACE9(_new, ...) _new; _CK_KABI_REPLACE8(__VA_ARGS__) +#define _CK_KABI_REPLACE10(_new, ...) _new; _CK_KABI_REPLACE9(__VA_ARGS__) +#define _CK_KABI_REPLACE11(_new, ...) _new; _CK_KABI_REPLACE10(__VA_ARGS__) +#define _CK_KABI_REPLACE12(_new, ...) _new; _CK_KABI_REPLACE11(__VA_ARGS__) + +#define CK_KABI_REPLACE_SPLIT(_orig, ...) _CK_KABI_REPLACE(_orig, \ + struct { __PASTE(_CK_KABI_REPLACE, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) }); + +# define CK_KABI_RESERVE(n) _CK_KABI_RESERVE(n); + +#define _CK_KABI_USE1(n, _new) _CK_KABI_RESERVE(n), _new +#define _CK_KABI_USE2(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE1(__VA_ARGS__) +#define _CK_KABI_USE3(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE2(__VA_ARGS__) +#define _CK_KABI_USE4(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE3(__VA_ARGS__) +#define _CK_KABI_USE5(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE4(__VA_ARGS__) +#define _CK_KABI_USE6(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE5(__VA_ARGS__) +#define _CK_KABI_USE7(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE6(__VA_ARGS__) +#define _CK_KABI_USE8(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE7(__VA_ARGS__) +#define _CK_KABI_USE9(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE8(__VA_ARGS__) +#define _CK_KABI_USE10(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE9(__VA_ARGS__) +#define _CK_KABI_USE11(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE10(__VA_ARGS__) +#define _CK_KABI_USE12(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE11(__VA_ARGS__) + +#define _CK_KABI_USE(...) _CK_KABI_REPLACE(__VA_ARGS__) +#define CK_KABI_USE(n, ...) _CK_KABI_USE(__PASTE(_CK_KABI_USE, COUNT_ARGS(__VA_ARGS__))(n, __VA_ARGS__)); + +# define CK_KABI_USE_SPLIT(n, ...) CK_KABI_REPLACE_SPLIT(_CK_KABI_RESERVE(n), __VA_ARGS__) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_RESERVE(n) unsigned long ck_reserved##n +#else +# define _CK_KABI_RESERVE(n) +#endif + +#define CK_KABI_EXCLUDE(_elem) _CK_KABI_EXCLUDE(_elem); + +#define CK_KABI_EXTEND_WITH_SIZE(_new, _size) \ + CK_KABI_EXTEND(union { \ + _new; \ + unsigned long CK_KABI_UNIQUE_ID[_size]; \ + __CK_KABI_CHECK_SIZE(_new, 8 * (_size)); \ + }) + +#ifdef CONFIG_CK_KABI_RESERVE +#define _CK_KABI_AUX_PTR(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved *_ck_reserved) +#define CK_KABI_AUX_PTR(_struct) \ + _CK_KABI_AUX_PTR(_struct); + +#define _CK_KABI_AUX_EMBED(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved _ck_reserved) +#define CK_KABI_AUX_EMBED(_struct) \ + _CK_KABI_AUX_EMBED(_struct); + +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) \ + CK_KABI_USE(n1, n2, \ + struct { CK_KABI_AUX_PTR(_struct) }) + +#define CK_KABI_AUX_SET_SIZE(_name, _struct) ({ \ + (_name)->_struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved); \ +}) + +#define CK_KABI_AUX_INIT_SIZE(_struct) \ + ._struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved), + +#define CK_KABI_AUX(_ptr, _struct, _field) ({ \ + size_t __off = offsetof(struct _struct##_ck_reserved, _field); \ + (_ptr)->_struct##_size_ck_reserved > __off ? true : false; \ +}) +#else +#define CK_KABI_AUX_PTR(_struct) +#define CK_KABI_AUX_EMBED(_struct) +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) +#define CK_KABI_AUX_SET_SIZE(_name, _struct) +#define CK_KABI_AUX_INIT_SIZE(_struct) +#define CK_KABI_AUX(_ptr, _struct, _field) (false) +#endif /* CONFIG_CK_KABI_RESERVE */ + +#endif /* _LINUX_CK_KABI_H */ diff --git a/init/Kconfig b/init/Kconfig index 0f700e8f01bb..d1904381ffc8 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1993,3 +1993,21 @@ config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE # . config ARCH_HAS_SYSCALL_WRAPPER def_bool n + +config CK_KABI_RESERVE + bool "Enables KABI and hotfix RESERVE" + default y + help + This option enables KABI and hotfix reserve. + For Anolis Cloud Kernel, the KABI reserve macros and hotfix reserve + macros are the same. + For some embedded systems, KABI and hotfix reserve may be not necessary. + Disable it on demand. + +config CK_KABI_SIZE_ALIGN_CHECKS + bool "Enables more stringent kabi checks in the macros" + default y + depends on CK_KABI_RESERVE + help + This option enables more stringent kabi checks. Those must be disabled + in case of a debug-build because they allow to change struct sizes. \ No newline at end of file -- Gitee From 1caa4b78fc5dd7f00ca969e365410132f9655cdd Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Sun, 26 Mar 2023 11:42:50 +0800 Subject: [PATCH 102/953] anolis: fs: export mount_lock ANBZ: #8323 Export mount_lock so that [un]lock_mount_hash() could be called from modules. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/namespace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/namespace.c b/fs/namespace.c index e6c61d4997cc..7fb51c1747bd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -98,6 +98,7 @@ EXPORT_SYMBOL_GPL(fs_kobj); * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); +EXPORT_SYMBOL_GPL(mount_lock); static inline void lock_mount_hash(void) { -- Gitee From 451a586def00c0b7e340590d28cce15be2adb38f Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Mon, 27 Mar 2023 11:54:52 +0800 Subject: [PATCH 103/953] anolis: fs: export fc_drop_locked ANBZ: #8323 Export fc_drop_locked. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fs_context.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fs_context.c b/fs/fs_context.c index 98589aae5208..8cc839a46f45 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -378,6 +378,7 @@ void fc_drop_locked(struct fs_context *fc) fc->root = NULL; deactivate_locked_super(sb); } +EXPORT_SYMBOL_GPL(fc_drop_locked); static void legacy_fs_context_free(struct fs_context *fc); -- Gitee From daeab0b2d6cf268ca8d678d9f8c819bd75a64af2 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Thu, 7 Apr 2022 14:58:02 +0800 Subject: [PATCH 104/953] anolis: fuse: bind sb to init_user_ns for virtfuse ANBZ: #8323 Bind the superblock to init_user_ns even when it's mounted from a user namespace other than init_user_ns. Co-developed-by: Jiang Liu Signed-off-by: Jiang Liu [ jingbo: bind vfuse's sb to init_user_ns through fsc->global] Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/fuse_i.h | 2 ++ fs/fuse/inode.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 3e65cdc94631..bbd58ccf51bb 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1354,4 +1354,6 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +static inline bool is_virtfuse_device(struct file *file) { return false; } + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 23ab31b967a1..90ba280ce36e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1730,10 +1730,12 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) /* * Require mount to happen from the same user namespace which - * opened /dev/fuse to prevent potential attacks. + * opened /dev/fuse to prevent potential attacks. While for + * virtual fuse, the mount is always bound to init_user_ns. */ - if ((ctx->file->f_op != &fuse_dev_operations) || - (ctx->file->f_cred->user_ns != sb->s_user_ns)) + if (!is_virtfuse_device(ctx->file) && + ((ctx->file->f_op != &fuse_dev_operations) || + (ctx->file->f_cred->user_ns != sb->s_user_ns))) return -EINVAL; ctx->fudptr = &ctx->file->private_data; @@ -1768,6 +1770,7 @@ static int fuse_get_tree(struct fs_context *fsc) struct fuse_conn *fc; struct fuse_mount *fm; struct super_block *sb; + bool is_virtfuse; int err; fc = kmalloc(sizeof(*fc), GFP_KERNEL); @@ -1804,14 +1807,18 @@ static int fuse_get_tree(struct fs_context *fsc) * Allow creating a fuse mount with an already initialized fuse * connection */ + is_virtfuse = is_virtfuse_device(ctx->file); fud = READ_ONCE(ctx->file->private_data); - if (ctx->file->f_op == &fuse_dev_operations && fud) { + if ((ctx->file->f_op == &fuse_dev_operations || is_virtfuse) && fud) { fsc->sget_key = fud->fc; sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); err = PTR_ERR_OR_ZERO(sb); if (!IS_ERR(sb)) fsc->root = dget(sb->s_root); } else { + /* bind sb to init_user_ns for virtfuse */ + if (is_virtfuse) + fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } out: -- Gitee From a528c86dcd24648f2a5d39418747d38852a32f41 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Sun, 26 Mar 2023 12:02:51 +0800 Subject: [PATCH 105/953] anolis: fuse: add fuse_mount_callback hook ANBZ: #8323 Add a hook which can be called when mounting fuse. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/fuse_i.h | 3 +++ fs/fuse/inode.c | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index bbd58ccf51bb..96c9bfa8c981 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1354,6 +1354,9 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +typedef int (*fuse_mount_cb_t)(struct file *file); +extern fuse_mount_cb_t fuse_mount_callback; + static inline bool is_virtfuse_device(struct file *file) { return false; } #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 90ba280ce36e..8e5fba985097 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -63,6 +63,9 @@ MODULE_PARM_DESC(max_user_congthresh, static struct file_system_type fuseblk_fs_type; #endif +fuse_mount_cb_t fuse_mount_callback; +EXPORT_SYMBOL_GPL(fuse_mount_callback); + struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); @@ -1821,6 +1824,15 @@ static int fuse_get_tree(struct fs_context *fsc) fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } + + if (is_virtfuse && !err) { + if (WARN_ON(!fuse_mount_callback)) + err = -EINVAL; + else + err = fuse_mount_callback(ctx->file); + if (err) + fc_drop_locked(fsc); + } out: if (fsc->s_fs_info) fuse_mount_destroy(fm); -- Gitee From 13ea374bfcf49ece3b14fde399ed50afc9d1e9bc Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 3 Mar 2023 11:59:11 +0800 Subject: [PATCH 106/953] anolis: virtfuse: add a driver support FUSE device virtualization ANBZ: #8323 For container workloads, there are use cases for storage sidecar containers to provide FUSE filesystems for other containers. With the default FUSE driver, we are facing several issues: 1) The protection mechanism in FUSE is designed for generic use cases, it's too restrict for container usage cases. 2) Multiple FUSE filesystems may be created from FUSE char device. So introduce the virtfuse driver, which provides: 1) a mechanism to run FUSE server inside containers without CAP_SYS_ADMIN capability. 2) a one FUSE filesystem instance per virtfuse device working mode. 3) a mechanism to mount the same FUSE filesystem instance in different user namespaces. 4) a communication protocol for container orchestrators to cooperate with FUSE server running in containers. The above goals are achieved by relaxing the protection constraints, and rely on the container orchestrators to manage access permissions. Co-developed-by: Jiang Liu Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/Kconfig | 11 ++ fs/fuse/Makefile | 1 + fs/fuse/fuse_i.h | 8 + fs/fuse/virtfuse.c | 283 ++++++++++++++++++++++++++++++++++ include/uapi/linux/virtfuse.h | 20 +++ 5 files changed, 323 insertions(+) create mode 100644 fs/fuse/virtfuse.c create mode 100644 include/uapi/linux/virtfuse.h diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 038ed0b9aaa5..ad36e8915364 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -52,3 +52,14 @@ config FUSE_DAX If you want to allow mounting a Virtio Filesystem with the "dax" option, answer Y. + +config VIRT_FUSE + tristate "FUSE device virtualization extension" + depends on FUSE_FS + help + This FUSE extension provides virtualized FUSE devices for container + workloads. Each virtualized FUSE device only supports one instance + of FUSE filesystem with special treatments for user namespace. + + If you want to support FUSE device virtualization for containers, + answer Y or M. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 0c48b35c058d..8dfb7f9c1f58 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o obj-$(CONFIG_VIRTIO_FS) += virtiofs.o +obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o fuse-$(CONFIG_FUSE_DAX) += dax.o diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 96c9bfa8c981..a1bdc9662512 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -31,6 +31,7 @@ #include #include #include +#include /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 @@ -1357,6 +1358,13 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, typedef int (*fuse_mount_cb_t)(struct file *file); extern fuse_mount_cb_t fuse_mount_callback; +#if IS_ENABLED(CONFIG_VIRT_FUSE) +static inline bool is_virtfuse_device(struct file *file) +{ + return iminor(file_inode(file)) != FUSE_MINOR; +} +#else static inline bool is_virtfuse_device(struct file *file) { return false; } +#endif #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c new file mode 100644 index 000000000000..6764945f705b --- /dev/null +++ b/fs/fuse/virtfuse.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022, Alibaba Cloud + * + * Virtual FUSE Device + */ + +#include +#include +#include +#include +#include +#include +#include "fuse_i.h" + +static uint virtfuse_dev_count = 64; +module_param_named(max_devices, virtfuse_dev_count, uint, 0644); +MODULE_PARM_DESC(max_devices, "Maximum number of devices supported"); + +struct virtfuse_dev { + char name[16]; /* adequate space for "virtfuse%d" */ + struct miscdevice dev; + atomic_t refcount; + spinlock_t lock; + struct fuse_conn *fc; +}; + +static struct virtfuse_dev *virtfuse_devices; +static struct file_operations virtfuse_fops; + +static inline struct virtfuse_dev *virtfuse_dev_get(struct file *file) +{ + dev_t devt = file_inode(file)->i_rdev; + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device->devt == devt) + return vfud; + } + + pr_err("virtfuse: failed to find virtfuse for minor %d\n", MINOR(devt)); + return NULL; +} + +static int virtfuse_dev_release(struct inode *inode, struct file *file) +{ + struct fuse_dev *fud = READ_ONCE(file->private_data); + struct virtfuse_dev *vfud; + + if (!fud) + return 0; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + /* + * 1. For the initial fuse mount after RESET, the mount may fail + * halfway and thus virtfuse_dev_alloc() is not called yet. + * + * 2. When the old fuse daemon has exited and RESET has not been + * done yet, refcount is zero while vfud->fc is still there. In + * this case, if a new fuse daemon tries to mount, the mount + * will fail and virtfuse_dev_release() will be called then. + */ + spin_lock(&vfud->lock); + if (vfud->fc && vfud->fc == fud->fc) + WARN_ON(atomic_dec_if_positive(&vfud->refcount) < 0); + spin_unlock(&vfud->lock); + + return fuse_dev_release(inode, file); +} + +static int virtfuse_dev_alloc(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_dev *fud = READ_ONCE(file->private_data); + int ret = 0; + + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + /* the initial fuse mount after RESET */ + WARN_ON(atomic_read(&vfud->refcount) != 0); + atomic_set(&vfud->refcount, 1); + vfud->fc = fuse_conn_get(fud->fc); + } else if (atomic_read(&vfud->refcount) == 0) { + pr_err_ratelimited("%s: please reset before mount\n", vfud->dev.name); + ret = -EBUSY; + } else if (fud->fc != vfud->fc) { + pr_err_ratelimited("%s: can't be mounted multiple times\n", vfud->dev.name); + ret = -EBUSY; + } + spin_unlock(&vfud->lock); + return ret; +} + +static int virtfuse_dev_clone(struct file *file, unsigned long arg) +{ + int fd, ret; + struct file *old; + + if (get_user(fd, (__u32 __user *)arg)) + return -EFAULT; + + old = fget(fd); + if (!old) + return -EINVAL; + /* + * Don't clone fuse_conn between normal fuse device and virtfuse, + * or different virtfuse. + */ + if (file_inode(old)->i_rdev != file_inode(file)->i_rdev) { + fput(old); + return -EINVAL; + } + + ret = fuse_dev_operations.unlocked_ioctl(file, FUSE_DEV_IOC_CLONE, arg); + if (!ret) + atomic_inc(&virtfuse_dev_get(file)->refcount); + fput(old); + return ret; +} + +static int virtfuse_clone(struct file *file) +{ + struct virtfuse_dev *vfud; + struct fuse_conn *fc; + struct fuse_dev *fud; + int err; + + if (file->private_data) + return -EEXIST; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + spin_unlock(&vfud->lock); + return -ENODATA; + } + + /* acquire temporary refcount */ + fc = fuse_conn_get(vfud->fc); + atomic_inc(&vfud->refcount); + spin_unlock(&vfud->lock); + + /* follow fuse_device_clone() to clone the connection */ + fud = fuse_dev_alloc_install(fc); + if (fud) { + atomic_inc(&vfud->refcount); + file->private_data = fud; + atomic_inc(&fc->dev_count); + err = 0; + } else { + err = -ENOMEM; + } + + /* drop temporary refcount */ + atomic_dec(&vfud->refcount); + fuse_conn_put(fc); + return err; +} + +static int virtfuse_reset(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + + if (!vfud) + return -EUCLEAN; + + if (atomic_read(&vfud->refcount)) + return -EBUSY; + + spin_lock(&vfud->lock); + if (vfud->fc) { + fc = vfud->fc; + vfud->fc = NULL; + } + spin_unlock(&vfud->lock); + + if (fc) + fuse_conn_put(fc); + return 0; +} + +static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case FUSE_DEV_IOC_CLONE: + return virtfuse_dev_clone(file, arg); + case VIRTFUSE_IOC_CLONE: + return virtfuse_clone(file); + case VIRTFUSE_IOC_RESET: + return virtfuse_reset(file); + default: + return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); + } +} + +static void virtfuse_free_devices(void) +{ + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device) + misc_deregister(&vfud->dev); + WARN_ON(atomic_read(&vfud->refcount) != 0); + } + kfree(virtfuse_devices); + virtfuse_devices = NULL; +} + +static int __init virtfuse_init(void) +{ + struct virtfuse_dev *vfud; + int i, ret; + + if (virtfuse_dev_count == 0) { + pr_err("virtfuse: max_devices is zero\n"); + return -EINVAL; + } else if (virtfuse_dev_count > VIRT_FUSE_MAX_DEVICES) { + pr_err("virtfuse: max_devices is too big, max %d\n", + VIRT_FUSE_MAX_DEVICES); + return -EINVAL; + } + + virtfuse_fops = fuse_dev_operations; + virtfuse_fops.owner = THIS_MODULE; + virtfuse_fops.compat_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.unlocked_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.release = virtfuse_dev_release; + + virtfuse_devices = kcalloc(virtfuse_dev_count, + sizeof(struct virtfuse_dev), GFP_KERNEL); + if (virtfuse_devices == NULL) + return -ENOMEM; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + spin_lock_init(&vfud->lock); + snprintf(vfud->name, sizeof(vfud->name), "virtfuse%d", i); + + vfud->dev.name = vfud->name; + vfud->dev.minor = MISC_DYNAMIC_MINOR; + vfud->dev.fops = &virtfuse_fops; + + ret = misc_register(&vfud->dev); + if (ret) { + pr_err("virtfuse: failed to create virtfuse%d\n", i); + vfud->dev.this_device = NULL; + virtfuse_free_devices(); + return ret; + } + } + + fuse_mount_callback = virtfuse_dev_alloc; + return 0; +} + +static void __exit virtfuse_exit(void) +{ + fuse_mount_callback = NULL; + virtfuse_free_devices(); +} + +module_init(virtfuse_init); +module_exit(virtfuse_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Virtual FUSE Device"); +MODULE_AUTHOR("Jingbo Xu "); +MODULE_AUTHOR("Jiang Liu "); diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h new file mode 100644 index 000000000000..00c3c883f2c8 --- /dev/null +++ b/include/uapi/linux/virtfuse.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VIRTFUSE_H +#define _LINUX_VIRTFUSE_H + +#include +#include + +/* Maximum number of devices supported. */ +#define VIRT_FUSE_MAX_DEVICES 1024 + +/* + * Clone a fuse device sharing the fuse connection bound to the specified + * virtual device. + */ +#define VIRTFUSE_IOC_CLONE _IO(0x99, 1) + +/* Reset the specified virtual device */ +#define VIRTFUSE_IOC_RESET _IO(0x99, 2) + +#endif -- Gitee From 22dbe305d3c2aa6064ada2b2f1010f9690070f81 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Tue, 28 Jun 2022 17:24:36 +0800 Subject: [PATCH 107/953] anolis: virtfuse: add VIRTFUSE_IOC_GET_MOUNTS ioctl ANBZ: #8323 Add VIRTFUSE_IOC_GET_MOUNTS ioctl to print all mountinfo of the specified virtual device. [jingbo's backport notes to ANCK 6.6] Open coded [un]lock_mount_hash() as they are moved into fs/namespace.c rather than fs/mount.h in v6.6. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/virtfuse.c | 145 ++++++++++++++++++++++++++++++++++ include/uapi/linux/virtfuse.h | 13 +++ 2 files changed, 158 insertions(+) diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c index 6764945f705b..cc7f25701dd4 100644 --- a/fs/fuse/virtfuse.c +++ b/fs/fuse/virtfuse.c @@ -11,7 +11,9 @@ #include #include #include +#include #include "fuse_i.h" +#include "../mount.h" static uint virtfuse_dev_count = 64; module_param_named(max_devices, virtfuse_dev_count, uint, 0644); @@ -191,6 +193,147 @@ static int virtfuse_reset(struct file *file) return 0; } +static int fillbuf(char *buf, unsigned int len, unsigned int *pcount, + const char *fmt, ...) +{ + va_list args; + unsigned int count = *pcount; + int step; + + va_start(args, fmt); + step = vsnprintf(buf + count, len - count, fmt, args); + va_end(args); + if (step >= len - count) + return -EMSGSIZE; + + *pcount += step; + return 0; +} + +static int virtfuse_get_mounts(struct file *file, unsigned long arg) +{ + struct virtfuse_mounts_buf vbuf, __user *u_vbuf; + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + struct fuse_mount *fm; + struct super_block *sb; + struct mount *mnt; + unsigned int count = 0, len; + int order, step, ret = 0; + char *buf, *name, *p; + void __user *u_buf; + + if (!vfud) + return -EUCLEAN; + + u_vbuf = (struct virtfuse_mounts_buf __user *)arg; + u_buf = (void __user *)u_vbuf->buf; + if (copy_from_user(&vbuf, u_vbuf, sizeof(vbuf)) != 0) + return -EFAULT; + + len = vbuf.len; + if (len <= 1) + return -EMSGSIZE; + + /* init the user buffer as an empty string */ + if (clear_user(u_buf, 1) != 0) + return -EFAULT; + + spin_lock(&vfud->lock); + if (vfud->fc) + fc = fuse_conn_get(vfud->fc); + spin_unlock(&vfud->lock); + if (!fc) + return 0; + + down_read(&fc->killsb); + fm = list_first_entry_or_null(&fc->mounts, struct fuse_mount, fc_entry); + if (!fm || !fm->sb) + goto out_up_killsb; + sb = fm->sb; + + name = __getname(); + if (!name) { + ret = -ENOMEM; + goto out_up_killsb; + } + + order = get_order(len); + buf = (void *)__get_free_pages(GFP_KERNEL, order); + if (!buf) { + ret = -ENOMEM; + goto out_putname; + } + + /* connection state */ + ret = fillbuf(buf, len, &count, "%s\n", + fc->connected ? "Connected" : "Aborted"); + if (ret) + goto out_free_pages; + + /* open coded lock_mount_hash() */ + write_seqlock(&mount_lock); + + list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + /* skip slave mounts */ + if (mnt->mnt_master) + continue; + + /* skip private mounts, e.g. from clone_private_mount() */ + if (!mnt->mnt_ns) + continue; + + /* mountpoint */ + p = dentry_path_raw(mnt->mnt_mountpoint, name, PATH_MAX); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + break; + } + ret = fillbuf(buf, len, &count, "%s %s", + mnt->mnt_devname ? : "none", p); + if (ret) + break; + + /* fstype */ + if (sb->s_subtype && sb->s_subtype[0]) + sprintf(name, "%s.%s", sb->s_type->name, sb->s_subtype); + else + sprintf(name, "%s", sb->s_type->name); + ret = fillbuf(buf, len, &count, " %s", name); + if (ret) + break; + + /* mount options */ + step = sprintf(name, "%s,user_id=%u,group_id=%u", + __mnt_is_readonly(&mnt->mnt) ? "ro" : "rw", + from_kuid_munged(fc->user_ns, fc->user_id), + from_kgid_munged(fc->user_ns, fc->group_id)); + if (fc->default_permissions) + step += sprintf(name + step, ",default_permissions"); + if (fc->allow_other) + step += sprintf(name + step, ",allow_other"); + ret = fillbuf(buf, len, &count, " %s\n", name); + if (ret) + break; + } + + /* open coded unlock_mount_hash() */ + write_sequnlock(&mount_lock); + + /* also copy the trailing null (ensured by vsnprintf) */ + if (!ret && (copy_to_user(u_buf, buf, count + 1) != 0)) + ret = -EFAULT; + +out_free_pages: + free_pages((unsigned long)buf, order); +out_putname: + __putname(name); +out_up_killsb: + up_read(&fc->killsb); + fuse_conn_put(fc); + return ret; +} + static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -201,6 +344,8 @@ static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, return virtfuse_clone(file); case VIRTFUSE_IOC_RESET: return virtfuse_reset(file); + case VIRTFUSE_IOC_GET_MOUNTS: + return virtfuse_get_mounts(file, arg); default: return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); } diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h index 00c3c883f2c8..93b7ab200b32 100644 --- a/include/uapi/linux/virtfuse.h +++ b/include/uapi/linux/virtfuse.h @@ -17,4 +17,17 @@ /* Reset the specified virtual device */ #define VIRTFUSE_IOC_RESET _IO(0x99, 2) +/* Print all mountinfo of the specified virtual device. */ +#define VIRTFUSE_IOC_GET_MOUNTS _IO(0x99, 3) + +/* + * @len indicates the size of the buffer indicated by @buf + * @buf indicates a buffer to contain the output mountinfo of the specified + * virtual device. + */ +struct virtfuse_mounts_buf { + __u32 len; + __u8 buf[]; +}; + #endif -- Gitee From 139a92bfe6b8a920a2325d551682de88d0b42dfa Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Wed, 20 Dec 2023 11:21:25 +0800 Subject: [PATCH 108/953] anolis: virtfuse: improve mntpoint printing ANBZ: #8323 Introduce d_absolute_path_locked() to improve the mntpoint printing: 1. print the full path of the mntpoint 1) mount ext4 on /mnt 2) mount fuse on /mnt/dir through virtfuse 3) prior this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the mntpoint as "/dir", as dentry_path_raw() only prints the relative path of the dentry in which mount it resides in. 4) with this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the absolute path of the mntpoint as "/mnt/dir" 2. print the right path of the mntpoint if it resides in a bind mount 1) bind mount /foo/bar to /mnt 2) mount fuse on /mnt/dir through virtfuse 3) prior this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the mntpoint as "/foo/bar/dir" 4) with this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the right path as "/mnt/dir" [jingbo's backport notes to ANCK 6.6] Replace prepend_path_locked() with d_absolute_path_locked(), as we have __prepend_path() helper now. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/d_path.c | 52 ++++++++++++++++++++++++++++++++++++++++++ fs/fuse/virtfuse.c | 7 +++++- include/linux/dcache.h | 1 + 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/fs/d_path.c b/fs/d_path.c index 5f4da5c8d5db..df50090b6a0f 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -196,6 +196,58 @@ static int prepend_path(const struct path *path, return error; } +static int prepend_path_locked(const struct path *path, + const struct path *root, + struct prepend_buffer *p) +{ + struct prepend_buffer b; + unsigned seq = 0; + int error; + + rcu_read_lock(); +restart: + b = *p; + read_seqbegin_or_lock(&rename_lock, &seq); + error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); + if (!(seq & 1)) + rcu_read_unlock(); + if (need_seqretry(&rename_lock, seq)) { + seq = 1; + goto restart; + } + done_seqretry(&rename_lock, seq); + + if (unlikely(error == 3)) + b = *p; + + if (b.len == p->len) + prepend_char(&b, '/'); + + *p = b; + return error; +} + +/* + * d_absolute_path_locked - return the absolute path of a dentry + * + * @path: path to report + * @buf: buffer to return value in + * @buflen: buffer length + * + * Write absolute pathname like d_absolute_path() except with mount_lock held. + */ +char *d_absolute_path_locked(const struct path *path, char *buf, int buflen) +{ + struct path root = {}; + DECLARE_BUFFER(b, buf, buflen); + + prepend_char(&b, 0); + if (unlikely(prepend_path_locked(path, &root, &b) > 1)) + return ERR_PTR(-EINVAL); + return extract_string(&b); +} +EXPORT_SYMBOL(d_absolute_path_locked); + /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c index cc7f25701dd4..b6ef6fbf4490 100644 --- a/fs/fuse/virtfuse.c +++ b/fs/fuse/virtfuse.c @@ -275,6 +275,11 @@ static int virtfuse_get_mounts(struct file *file, unsigned long arg) write_seqlock(&mount_lock); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + struct path path = { + .dentry = mnt->mnt.mnt_root, + .mnt = &mnt->mnt + }; + /* skip slave mounts */ if (mnt->mnt_master) continue; @@ -284,7 +289,7 @@ static int virtfuse_get_mounts(struct file *file, unsigned long arg) continue; /* mountpoint */ - p = dentry_path_raw(mnt->mnt_mountpoint, name, PATH_MAX); + p = d_absolute_path_locked(&path, name, PATH_MAX); if (IS_ERR(p)) { ret = PTR_ERR(p); break; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f59..344f41a3e052 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -296,6 +296,7 @@ extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); +extern char *d_absolute_path_locked(const struct path *, char *, int); /* Allocation counts.. */ -- Gitee From 1b1e068f9e149e1497ff6209c53b376f4d47005d Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 5 May 2023 13:36:27 +0800 Subject: [PATCH 109/953] anolis: configs: x86_64, arm64: enable virtfuse as module ANBZ: #8323 Enable fuse device virtualization as module in x86_64 and arm64. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index b3c3c3da6168..419072dae8e2 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -5889,6 +5889,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index dadd27949f52..27ba33178302 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -5908,6 +5908,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index b5adf870d839..2931e96d0ba8 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -6484,6 +6484,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index be00c2ce6add..b031bb4585c3 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -6473,6 +6473,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -- Gitee From c316f7e570c8ae9878196022e5f6f3d7ac38fba8 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:44:51 +0800 Subject: [PATCH 110/953] anolis: iocost: add legacy interface file ANBZ: #8329 To support cgroup v1. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2785 --- block/blk-iocost.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 7ee8d85c2c68..7f77928a6feb 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3519,8 +3519,31 @@ static struct cftype ioc_files[] = { {} }; +static struct cftype ioc_legacy_files[] = { + { + .name = "cost.weight", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_weight_show, + .write = ioc_weight_write, + }, + { + .name = "cost.qos", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_qos_show, + .write = ioc_qos_write, + }, + { + .name = "cost.model", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_cost_model_show, + .write = ioc_cost_model_write, + }, + {} +}; + static struct blkcg_policy blkcg_policy_iocost = { .dfl_cftypes = ioc_files, + .legacy_cftypes = ioc_legacy_files, .cpd_alloc_fn = ioc_cpd_alloc, .cpd_free_fn = ioc_cpd_free, .pd_alloc_fn = ioc_pd_alloc, -- Gitee From 8f621d15cb49fc80a519779822475aea5b4d60c8 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:45:10 +0800 Subject: [PATCH 111/953] anolis: iocost: add ioc_gq stat ANBZ: #8329 Add a stat file to monitor the ioc_gq stat. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2785 --- block/blk-iocost.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 7f77928a6feb..2a6a360a58c2 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3497,6 +3497,36 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, return ret; } +static u64 ioc_stat_prfill(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct blkcg_gq *blkg = pd->blkg; + const char *dname = blkg_dev_name(blkg); + struct ioc_gq *iocg = blkg_to_iocg(blkg); + struct ioc *ioc = iocg->ioc; + + if (!dname) + return 0; + + seq_printf(sf, "%s is_active=%d active=%u inuse=%u " + "hweight_active=%u hweight_inuse=%u vrate=%llu\n", + dname, !list_empty(&iocg->active_list), + iocg->active, iocg->inuse, + iocg->hweight_active, iocg->hweight_inuse, + (unsigned long long)atomic64_read(&ioc->vtime_rate)); + + return 0; +} + +static int ioc_cost_print_stat(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + + blkcg_print_blkgs(sf, blkcg, ioc_stat_prfill, + &blkcg_policy_iocost, seq_cft(sf)->private, false); + return 0; +} + static struct cftype ioc_files[] = { { .name = "weight", @@ -3538,6 +3568,11 @@ static struct cftype ioc_legacy_files[] = { .seq_show = ioc_cost_model_show, .write = ioc_cost_model_write, }, + { + .name = "cost.stat", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_cost_print_stat, + }, {} }; -- Gitee From 9d779500c7e2f99c9fb03c118ff6a4618fbaf197 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:51:36 +0800 Subject: [PATCH 112/953] anolis: mm: add proc interface to control context readahead ANBZ: #8333 For some workloads whose io activities are mostly random, context readahead feature can introduce unnecessary io read operations, which will impact app's performance. Context readahead's algorithm is straightforward and not that smart. This patch adds "/proc/sys/vm/enable_context_readahead" to control whether to disable or enable this feature. Currently we enable context readahead default, user can echo 0 to /proc/sys/vm/enable_context_readahead to disable context readahead. We also have tested mongodb's performance in 'random point select' case, With context readahead enabled: mongodb eps 12409 With context readahead disabled: mongodb eps 14443 About 16% performance improvement. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2786 --- Documentation/admin-guide/sysctl/vm.rst | 17 +++++++++++++++++ kernel/sysctl.c | 11 +++++++++++ mm/readahead.c | 9 +++++++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 45ba1f4dc004..beabacb0fcba 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm: - watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode +- enable_context_readahead admin_reserve_kbytes @@ -1044,3 +1045,19 @@ of other processes running on other nodes will not be affected. Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. + + +enable_context_readahead +======================== + +Specific workloads whose io activities are mostly random, context readahead +feature may introduce unnecessary io read operations, which will impact app's +performance. + +Default it is enabled. + +To disable context readahead: + echo 0 > /proc/sys/vm/enable_context_readahead + +To enable context readahead again: + echo 1 > /proc/sys/vm/enable_context_readahead diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0c11d319fa01..204528a81b43 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -135,6 +135,8 @@ enum sysctl_writes_mode { static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; #endif /* CONFIG_PROC_SYSCTL */ +extern int sysctl_enable_context_readahead; + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) int sysctl_legacy_va_layout; @@ -2272,6 +2274,15 @@ static struct ctl_table vm_table[] = { .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif + { + .procname = "enable_context_readahead", + .data = &sysctl_enable_context_readahead, + .maxlen = sizeof(sysctl_enable_context_readahead), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/mm/readahead.c b/mm/readahead.c index 1d1a84deb5bc..ac936f9bdaea 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -131,6 +131,9 @@ #include "internal.h" +/* enable context readahead default */ +int sysctl_enable_context_readahead = 1; + /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -630,9 +633,11 @@ static void ondemand_readahead(struct readahead_control *ractl, * Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ - if (try_context_readahead(ractl->mapping, ra, index, req_size, - max_pages)) + if (sysctl_enable_context_readahead && + try_context_readahead(ractl->mapping, ra, index, req_size, + max_pages)) { goto readit; + } /* * standalone, small random read -- Gitee From c92126a089cf75721dbaa3bed1711a6b80ed3f8a Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 6 Dec 2017 17:40:35 +0800 Subject: [PATCH 113/953] anolis: writeback: add memcg_blkcg_link tree ANBZ: #8327 Here we add a global radix tree to link memcg and blkcg that the user attach the tasks to when using cgroup v1, which is used for writeback cgroup. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/linux/backing-dev.h | 31 +++++- kernel/cgroup/cgroup-internal.h | 2 + kernel/cgroup/cgroup.c | 16 +++- mm/backing-dev.c | 162 +++++++++++++++++++++++++++++++- mm/memcontrol.c | 2 +- 5 files changed, 206 insertions(+), 7 deletions(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 1a97277f99b1..a28d2248ed82 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -174,9 +174,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_subsys_on_dfl(memory_cgrp_subsys) && - cgroup_subsys_on_dfl(io_cgrp_subsys) && - (bdi->capabilities & BDI_CAP_WRITEBACK) && + return (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -318,6 +316,13 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, rcu_read_unlock(); } +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset); +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links); +void free_memcg_blkcg_links(struct list_head *links_to_free); +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css); #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -368,6 +373,26 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ +} + +static inline int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + return 0; +} + +static inline void free_memcg_blkcg_links(struct list_head *links_to_free) +{ +} + +static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ +} + #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index c56071f150f2..82159299c35b 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -108,6 +108,7 @@ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; + int dst_count; /* the number of tasks in the set */ int nr_tasks; @@ -152,6 +153,7 @@ struct cgroup_mgctx { .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ + .dst_count = 0, \ } #define CGROUP_MGCTX_INIT(name) \ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 518725b57200..e4cfe8f49fe5 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -2429,9 +2430,11 @@ static void cgroup_migrate_add_task(struct task_struct *task, if (list_empty(&cset->mg_node)) list_add_tail(&cset->mg_node, &mgctx->tset.src_csets); - if (list_empty(&cset->mg_dst_cset->mg_node)) + if (list_empty(&cset->mg_dst_cset->mg_node)) { list_add_tail(&cset->mg_dst_cset->mg_node, &mgctx->tset.dst_csets); + mgctx->tset.dst_count++; + } } /** @@ -2512,9 +2515,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) struct task_struct *task, *tmp_task; struct css_set *cset, *tmp_cset; int ssid, failed_ssid, ret; + LIST_HEAD(tmp_links); /* check that we can legitimately attach to the cgroup */ if (tset->nr_tasks) { + ret = allocate_memcg_blkcg_links(tset->dst_count*2, &tmp_links); + if (ret) + goto out_release_tset; + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->can_attach) { tset->ssid = ssid; @@ -2567,6 +2575,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) tset->ssid = ssid; ss->attach(tset); } + list_for_each_entry(cset, &tset->dst_csets, mg_node) + insert_memcg_blkcg_link(ss, &tmp_links, cset); } while_each_subsys_mask(); } @@ -2593,6 +2603,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) } spin_unlock_irq(&css_set_lock); + free_memcg_blkcg_links(&tmp_links); + /* * Re-initialize the cgroup_taskset structure in case it is reused * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() @@ -5343,6 +5355,8 @@ static void css_free_rwork_fn(struct work_struct *work) struct cgroup_subsys_state *parent = css->parent; int id = css->id; + delete_memcg_blkcg_link(ss, css); + ss->css_free(css); cgroup_idr_remove(&ss->css_idr, id); cgroup_put(cgrp); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index e039d05304dd..2fed2a533ed7 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -499,6 +499,158 @@ static void wb_exit(struct bdi_writeback *wb) #include +struct memcg_blkcg_link { + struct list_head list; + struct rcu_head rcu; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; +}; + +static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); +static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); + +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + struct memcg_blkcg_link *link; + int i; + + for (i = 0; i < count; i++) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + free_memcg_blkcg_links(tmp_links); + return -ENOMEM; + } + list_add(&link->list, tmp_links); + } + return 0; +} + +static void link_free(struct rcu_head *head) +{ + struct memcg_blkcg_link *link = container_of(head, + struct memcg_blkcg_link, rcu); + kfree(link); +} + +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + int err; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + WARN_ON(list_empty(tmp_links)); + + memcg_css = cset->subsys[memory_cgrp_id]; + blkcg_css = cset->subsys[io_cgrp_id]; + + if ((memcg_css == &root_mem_cgroup->css) || + (blkcg_css == blkcg_root_css)) + return; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link && ((link->blkcg_css == blkcg_css) || + (link->blkcg_css == blkcg_root_css))) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + spin_lock(&memcg_blkcg_tree_lock); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + blkcg_css = blkcg_root_css; + } + + link = list_first_entry(tmp_links, struct memcg_blkcg_link, list); + list_del_init(&link->list); + + link->memcg_css = memcg_css; + link->blkcg_css = blkcg_css; + err = radix_tree_insert(&memcg_blkcg_tree, memcg_css->id, link); + WARN_ON(err); + + spin_unlock(&memcg_blkcg_tree_lock); +} + +void free_memcg_blkcg_links(struct list_head *links_to_free) +{ + struct memcg_blkcg_link *link, *tmp_link; + + list_for_each_entry_safe(link, tmp_link, links_to_free, list) { + list_del(&link->list); + kfree(link); + } +} + +static void delete_memcg_link(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + + spin_lock(&memcg_blkcg_tree_lock); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + spin_lock(&memcg_blkcg_tree_lock); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + if (link->blkcg_css == blkcg_css) { + radix_tree_delete(&memcg_blkcg_tree, link->memcg_css->id); + call_rcu(&link->rcu, link_free); + } + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + if (ss->id == io_cgrp_id) + delete_blkcg_link(css); + if (ss->id == memory_cgrp_id) + delete_memcg_link(css); +} + +static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) + blkcg_css = link->blkcg_css; + else + blkcg_css = blkcg_root_css; + + css_get(blkcg_css); + + rcu_read_unlock(); + + return blkcg_css; +} + /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. @@ -583,7 +735,10 @@ static int cgwb_create(struct backing_dev_info *bdi, int ret = 0; memcg = mem_cgroup_from_css(memcg_css); - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); @@ -704,7 +859,10 @@ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index dd854cc65fd9..1666f62841e5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,7 +371,7 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); - if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) + if (!memcg) memcg = root_mem_cgroup; return &memcg->css; -- Gitee From 614ba561559a5d0eae6b094679dac921fb45f10e Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:29:14 +0800 Subject: [PATCH 114/953] anolis: writeback: add debug info for memcg-blkcg link ANBZ: #8327 Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/trace/events/writeback.h | 21 ++++++++++++++ mm/backing-dev.c | 50 ++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 54e353c9f919..acd558657cf3 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,27 @@ TRACE_EVENT(writeback_bdi_register, ) ); +TRACE_EVENT(insert_memcg_blkcg_link, + TP_PROTO(struct cgroup_subsys_state *memcg_css, + struct cgroup_subsys_state *blkcg_css, + struct cgroup_subsys_state *old_blkcg_css), + TP_ARGS(memcg_css, blkcg_css, old_blkcg_css), + TP_STRUCT__entry( + __field(unsigned int, memcg_ino) + __field(unsigned int, blkcg_ino) + __field(unsigned int, old_blkcg_ino) + ), + TP_fast_assign( + __entry->memcg_ino = kernfs_ino(memcg_css->cgroup->kn); + __entry->blkcg_ino = kernfs_ino(blkcg_css->cgroup->kn); + __entry->old_blkcg_ino = old_blkcg_css ? + kernfs_ino(old_blkcg_css->cgroup->kn) : 0; + ), + TP_printk("memcg_ino=%u blkcg_ino=%u old_blkcg_ino=%u", + __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino + ) +); + DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 2fed2a533ed7..6fff90c68c77 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -41,9 +41,22 @@ struct workqueue_struct *bdi_wq; static struct dentry *bdi_debug_root; +#ifdef CONFIG_CGROUP_WRITEBACK +static struct dentry *memcg_blkcg_file; +static const struct file_operations memcg_blkcg_debug_fops; +#endif + static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); + +#ifdef CONFIG_CGROUP_WRITEBACK + if (!bdi_debug_root) + return; + + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, + NULL, &memcg_blkcg_debug_fops); +#endif } static int bdi_debug_stats_show(struct seq_file *m, void *v) @@ -509,6 +522,40 @@ struct memcg_blkcg_link { static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); +static int memcg_blkcg_link_show(struct seq_file *m, void *v) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + seq_puts(m, "memory <---> blkio\n"); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + seq_printf(m, "%s:%5lu <---> %s:%5lu\n", + link->memcg_css->cgroup->kn->name, + kernfs_ino(link->memcg_css->cgroup->kn), + (link->blkcg_css == blkcg_root_css) ? + "root" : link->blkcg_css->cgroup->kn->name, + kernfs_ino(link->blkcg_css->cgroup->kn)); + } + rcu_read_unlock(); + + return 0; +} + +static int memcg_blkcg_link_open(struct inode *inode, struct file *file) +{ + return single_open(file, memcg_blkcg_link_show, inode->i_private); +} + +static const struct file_operations memcg_blkcg_debug_fops = { + .open = memcg_blkcg_link_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) { struct memcg_blkcg_link *link; @@ -562,6 +609,9 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, } rcu_read_unlock(); + trace_insert_memcg_blkcg_link(memcg_css, blkcg_css, + link ? link->blkcg_css : NULL); + spin_lock(&memcg_blkcg_tree_lock); if (link) { radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); -- Gitee From 0109f56a954fae808f7e916d49e411f148cc20f8 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:30:11 +0800 Subject: [PATCH 115/953] anolis: fs/writeback: fix double free of blkcg_css MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8327 We have gotten a WARNNING when releasing blkcg_css: [332489.681635] WARNING: CPU: 55 PID: 14859 at lib/list_debug.c:56 __list_del_entry+0x81/0xc0 [332489.682191] list_del corruption, ffff883e6b94d450->prev is LIST_POISON2 (dead000000000200) ...... [332489.683895] CPU: 55 PID: 14859 Comm: kworker/55:2 Tainted: G [332489.684477] Hardware name: Inspur SA5248M4/X10DRT-PS, BIOS 4.05A 10/11/2016 [332489.685061] Workqueue: cgroup_destroy css_release_work_fn [332489.685654]  ffffc9001d92bd28 ffffffff81380042 ffffc9001d92bd78 0000000000000000 [332489.686269]  ffffc9001d92bd68 ffffffff81088f8b 0000003800000000 ffff883e6b94d4a0 [332489.686867]  ffff883e6b94d400 ffffffff81ce8fe0 ffff88375b24f400 ffff883e6b94d4a0 [332489.687479] Call Trace: [332489.688078]  [] dump_stack+0x63/0x81 [332489.688681]  [] __warn+0xcb/0xf0 [332489.689276]  [] warn_slowpath_fmt+0x5f/0x80 [332489.689877]  [] __list_del_entry+0x81/0xc0 [332489.690481]  [] css_release_work_fn+0x42/0x140 [332489.691090]  [] process_one_work+0x189/0x420 [332489.691693]  [] worker_thread+0x4e/0x4b0 [332489.692293]  [] ? process_one_work+0x420/0x420 [332489.692905]  [] kthread+0xe6/0x100 [332489.693504]  [] ? kthread_park+0x60/0x60 [332489.694099]  [] ret_from_fork+0x41/0x50 [332489.694722] ---[ end trace 0cf869c4a5cfba87 ]--- ...... This is caused by calling css_get after the css is killed by another thread described below: Thread 1 Thread 2 cgroup_rmdir -> kill_css -> percpu_ref_kill_and_confirm -> css_killed_ref_fn css_killed_work_fn -> css_put -> css_release wb_get_create -> find_blkcg_css -> css_get -> css_put -> css_release (double free) -> css_release_workfn -> css_free_work_fn -> blkcg_css_free When doublefree happened, it may free the memory still used by other threads and cause a kernel panic. Fix this by using css_tryget_online in find_blkcg_css while will return false if the css is killed. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- mm/backing-dev.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6fff90c68c77..6c269c011181 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -689,15 +689,21 @@ static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *me rcu_read_lock(); link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); - if (link) + if (link) { blkcg_css = link->blkcg_css; - else - blkcg_css = blkcg_root_css; + if (css_tryget_online(blkcg_css)) + goto out; + } + /* + * If not blkcg_root_css and tryget failed, + * get a reference of blkcg_root_css and return. + */ + blkcg_css = blkcg_root_css; css_get(blkcg_css); +out: rcu_read_unlock(); - return blkcg_css; } -- Gitee From 573f40510bb9bc287bbbbd6af65c8dc97be53e31 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:30:53 +0800 Subject: [PATCH 116/953] anolis: fs/writeback: Attach inode's wb to root if needed ANBZ: #8327 There might have tons of files queued in the writeback, awaiting for writing back. Unfortunately, the writeback's cgroup has been dead. In this case, we reassociate the inode with another writeback, but we possibly can't because the writeback associated with the dead cgroup is the only valid one. In this case, the new writeback is allocated, initialized and associated with the inode in the non-stopping fashion until all data resident in the inode's page cache are flushed to disk. It causes unnecessary high system load. This fixes the issue by enforce moving the inode to root cgroup when the previous binding cgroup becomes dead. With it, no more unnecessary writebacks are created, populated and the system load decreased by about 6x in the test case we carried out: Without the patch: 30% system load With the patch: 5% system load Signed-off-by: luanshi Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- fs/fs-writeback.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1767493dffda..42383a5c8e95 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -858,6 +858,16 @@ void wbc_detach_inode(struct writeback_control *wbc) inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); inode->i_wb_frn_history = history; + /* + * Without wb list lock i_wb can switch at any point, so it can + * judge on the wrong wb anyway. + * + * The wb is switched to the root memcg unconditionally. We expect + * the correct wb (best candidate) is picked up in next round. + */ + if (wb == inode->i_wb && wb_dying(wb) && !(inode->i_state & I_DIRTY_ALL)) + inode_switch_wbs(inode, root_mem_cgroup->css.id); + wb_put(wbc->wb); wbc->wb = NULL; } -- Gitee From 44f84625ded5586ba3e993dbb24228c4a4fc5476 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:34:36 +0800 Subject: [PATCH 117/953] anolis: writeback: introduce cgwb_v1 boot param ANBZ: #8327 So far writeback control is supported for cgroup v1 interface. However it also has some restrictions, so introduce a new kernel boot parameter to control the behavior which is disabled by default. Users can enable the writeback control for cgroup v1 with the command line "cgwb_v1". Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- .../admin-guide/kernel-parameters.txt | 3 +++ include/linux/backing-dev.h | 19 ++++++++++++++++++- mm/backing-dev.c | 12 ++++++++++++ mm/memcontrol.c | 13 ++++++++++++- 4 files changed, 45 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 28cda809d805..ee22d18ca8ab 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -586,6 +586,9 @@ nokmem -- Disable kernel memory accounting. nobpf -- Disable BPF memory accounting. + cgwb_v1 Enable writeback control for cgroup for cgroup v1 + interface. + checkreqprot= [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } See security/selinux/Kconfig help text. diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index a28d2248ed82..fc826261b21f 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -159,6 +159,21 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); +extern bool cgwb_v1; + +static inline bool memcg_blkcg_on_dfl(void) +{ + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys); +} + +static inline bool cgroup_writeback_support_v1(void) +{ + return cgwb_v1 && + !cgroup_subsys_on_dfl(memory_cgrp_subsys) && + !cgroup_subsys_on_dfl(io_cgrp_subsys); +} + /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest @@ -174,7 +189,9 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return (bdi->capabilities & BDI_CAP_WRITEBACK) && + return (memcg_blkcg_on_dfl() || + cgroup_writeback_support_v1()) && + (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6c269c011181..f032314fcbf2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -54,6 +54,9 @@ static void bdi_debug_init(void) if (!bdi_debug_root) return; + if (!cgwb_v1) + return; + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, NULL, &memcg_blkcg_debug_fops); #endif @@ -561,6 +564,9 @@ int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) struct memcg_blkcg_link *link; int i; + if (!cgwb_v1) + return 0; + for (i = 0; i < count; i++) { link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { @@ -588,6 +594,9 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *memcg_css; int err; + if (!cgwb_v1) + return; + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) return; @@ -673,6 +682,9 @@ static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { + if (!cgwb_v1) + return; + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) return; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1666f62841e5..ce2cb1959db9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,7 +371,8 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); - if (!memcg) + if (!memcg || + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) memcg = root_mem_cgroup; return &memcg->css; @@ -7367,6 +7368,16 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +bool cgwb_v1; + +static int __init enable_cgroup_writeback_v1(char *s) +{ + cgwb_v1 = true; + + return 0; +} +__setup("cgwb_v1", enable_cgroup_writeback_v1); + /* * subsys_initcall() for memory controller. * -- Gitee From 98de52ecb966aa0a1f6cc6d9f0470bb18fa0a065 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:39:04 +0800 Subject: [PATCH 118/953] anolis: fs/writeback: wrap cgroup writeback v1 logic ANBZ: #8327 Wrap cgroup writeback v1 logic to prevent build errors without CONFIG_CGROUPS or CONFIG_CGROUP_WRITEBACK. Signed-off-by: Hao Xu Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/linux/backing-dev.h | 2 ++ include/trace/events/writeback.h | 2 ++ mm/memcontrol.c | 8 +++++++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index fc826261b21f..506f89a99a6c 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -390,6 +390,7 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +#ifdef CONFIG_CGROUPS static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct list_head *tmp_links, struct css_set *cset) @@ -409,6 +410,7 @@ static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { } +#endif #endif /* CONFIG_CGROUP_WRITEBACK */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index acd558657cf3..0e190e112dc4 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,7 @@ TRACE_EVENT(writeback_bdi_register, ) ); +#ifdef CONFIG_CGROUP_WRITEBACK TRACE_EVENT(insert_memcg_blkcg_link, TP_PROTO(struct cgroup_subsys_state *memcg_css, struct cgroup_subsys_state *blkcg_css, @@ -461,6 +462,7 @@ TRACE_EVENT(insert_memcg_blkcg_link, __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino ) ); +#endif DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ce2cb1959db9..b26eb7525886 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,8 +371,12 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); +#ifdef CONFIG_CGROUP_WRITEBACK if (!memcg || - (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) +#else + if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) +#endif memcg = root_mem_cgroup; return &memcg->css; @@ -7368,6 +7372,7 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +#ifdef CONFIG_CGROUP_WRITEBACK bool cgwb_v1; static int __init enable_cgroup_writeback_v1(char *s) @@ -7377,6 +7382,7 @@ static int __init enable_cgroup_writeback_v1(char *s) return 0; } __setup("cgwb_v1", enable_cgroup_writeback_v1); +#endif /* * subsys_initcall() for memory controller. -- Gitee From 3d4d0060c1d1fbbbe8a8546f51db256c43b6b4bd Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Mon, 20 Nov 2023 12:14:18 +0800 Subject: [PATCH 119/953] cachefiles: introduce object ondemand state ANBZ: #8342 commit 357a18d033143617e9c7d420c8f0dd4cbab5f34d upstream. Previously, @ondemand_id field was used not only to identify ondemand state of the object, but also to represent the index of the xarray. This commit introduces @state field to decouple the role of @ondemand_id and adds helpers to access it. Signed-off-by: Jia Zhu Link: https://lore.kernel.org/r/20231120041422.75170-2-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu Reviewed-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/internal.h | 21 +++++++++++++++++++++ fs/cachefiles/ondemand.c | 21 +++++++++------------ 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 2ad58c465208..00beedeaec18 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -44,6 +44,11 @@ struct cachefiles_volume { struct dentry *fanout[256]; /* Fanout subdirs */ }; +enum cachefiles_object_state { + CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ + CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ +}; + /* * Backing file state. */ @@ -62,6 +67,7 @@ struct cachefiles_object { #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ #ifdef CONFIG_CACHEFILES_ONDEMAND int ondemand_id; + enum cachefiles_object_state state; #endif }; @@ -296,6 +302,21 @@ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len); +#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ +static inline bool \ +cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ +{ \ + return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} \ + \ +static inline void \ +cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ +{ \ + object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} + +CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); +CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 0254ed39f68c..90456b8a4b3e 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -15,6 +15,7 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, xa_lock(&cache->reqs); object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + cachefiles_ondemand_set_object_close(object); /* * Flush all pending READ requests since their completion depends on @@ -176,6 +177,8 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); trace_cachefiles_ondemand_copen(req->object, id, size); + cachefiles_ondemand_set_object_open(req->object); + out: complete(&req->done); return ret; @@ -363,7 +366,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); - if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { + if (opcode != CACHEFILES_OP_OPEN && + !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; @@ -430,18 +434,11 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, void *private) { struct cachefiles_object *object = req->object; - int object_id = object->ondemand_id; - /* - * It's possible that object id is still 0 if the cookie looking up - * phase failed before OPEN request has ever been sent. Also avoid - * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means - * anon_fd has already been closed. - */ - if (object_id <= 0) + if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object_id; + req->msg.object_id = object->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -460,7 +457,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, int object_id = object->ondemand_id; /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (object_id <= 0) { + if (!cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object_id == 0); pr_info_once("READ: anonymous fd closed prematurely.\n"); return -EIO; @@ -485,7 +482,7 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) * creating a new tmpfile as the cache file. Reuse the previously * allocated object ID if any. */ - if (object->ondemand_id > 0) + if (cachefiles_ondemand_object_is_open(object)) return 0; volume_key_size = volume->key[0] + 1; -- Gitee From 3eacee6b5e270a33ffc5840d1b0fcf30cee6b86d Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Mon, 20 Nov 2023 12:14:19 +0800 Subject: [PATCH 120/953] cachefiles: extract ondemand info field from cachefiles_object ANBZ: #8342 commit 3c5ecfe16e7699011c12c2d44e55437415331fa3 upstream. We'll introduce a @work_struct field for @object in subsequent patches, it will enlarge the size of @object. As the result of that, this commit extracts ondemand info field from @object. Signed-off-by: Jia Zhu Link: https://lore.kernel.org/r/20231120041422.75170-3-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu Reviewed-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/interface.c | 7 ++++++- fs/cachefiles/internal.h | 26 ++++++++++++++++++++++---- fs/cachefiles/ondemand.c | 34 ++++++++++++++++++++++++++++------ 3 files changed, 56 insertions(+), 11 deletions(-) diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 40052bdb3365..35ba2117a6f6 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie) if (!object) return NULL; + if (cachefiles_ondemand_init_obj_info(object, volume)) { + kmem_cache_free(cachefiles_object_jar, object); + return NULL; + } + refcount_set(&object->ref, 1); spin_lock_init(&object->lock); @@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object, ASSERTCMP(object->file, ==, NULL); kfree(object->d_name); - + cachefiles_ondemand_deinit_obj_info(object); cache = object->volume->cache->cache; fscache_put_cookie(object->cookie, fscache_cookie_put_object); object->cookie = NULL; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 00beedeaec18..b0fe76964bc0 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -49,6 +49,12 @@ enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ }; +struct cachefiles_ondemand_info { + int ondemand_id; + enum cachefiles_object_state state; + struct cachefiles_object *object; +}; + /* * Backing file state. */ @@ -66,8 +72,7 @@ struct cachefiles_object { unsigned long flags; #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ #ifdef CONFIG_CACHEFILES_ONDEMAND - int ondemand_id; - enum cachefiles_object_state state; + struct cachefiles_ondemand_info *ondemand; #endif }; @@ -302,17 +307,21 @@ extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len); +extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume); +extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj); + #define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ static inline bool \ cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ { \ - return object->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ + return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ } \ \ static inline void \ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ { \ - object->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ + object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ } CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); @@ -338,6 +347,15 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object, { return -EOPNOTSUPP; } + +static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume) +{ + return 0; +} +static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) +{ +} #endif /* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 90456b8a4b3e..deb7e3007aa1 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -9,12 +9,13 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, { struct cachefiles_object *object = file->private_data; struct cachefiles_cache *cache = object->volume->cache; - int object_id = object->ondemand_id; + struct cachefiles_ondemand_info *info = object->ondemand; + int object_id = info->ondemand_id; struct cachefiles_req *req; XA_STATE(xas, &cache->reqs, 0); xa_lock(&cache->reqs); - object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object); /* @@ -222,7 +223,7 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; req->msg.object_id = object_id; - object->ondemand_id = object_id; + object->ondemand->ondemand_id = object_id; cachefiles_get_unbind_pincount(cache); trace_cachefiles_ondemand_open(object, &req->msg, load); @@ -368,7 +369,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, if (opcode != CACHEFILES_OP_OPEN && !cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object->ondemand_id == 0); + WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; goto out; @@ -438,7 +439,7 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object->ondemand_id; + req->msg.object_id = object->ondemand->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -454,7 +455,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand_id; + int object_id = object->ondemand->ondemand_id; /* Stop enqueuing requests when daemon has closed anon_fd. */ if (!cachefiles_ondemand_object_is_open(object)) { @@ -500,6 +501,27 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object) cachefiles_ondemand_init_close_req, NULL); } +int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, + struct cachefiles_volume *volume) +{ + if (!cachefiles_in_ondemand_mode(volume->cache)) + return 0; + + object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info), + GFP_KERNEL); + if (!object->ondemand) + return -ENOMEM; + + object->ondemand->object = object; + return 0; +} + +void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object) +{ + kfree(object->ondemand); + object->ondemand = NULL; +} + int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len) { -- Gitee From b0eca13e27c0cd0d8a373d8aac6c32f9a427ccb3 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Mon, 20 Nov 2023 12:14:20 +0800 Subject: [PATCH 121/953] cachefiles: resend an open request if the read request's object is closed ANBZ: #8342 commit 0a7e54c1959c0feb2de23397ec09c7692364313e upstream. When an anonymous fd is closed by user daemon, if there is a new read request for this file comes up, the anonymous fd should be re-opened to handle that read request rather than fail it directly. 1. Introduce reopening state for objects that are closed but have inflight/subsequent read requests. 2. No longer flush READ requests but only CLOSE requests when anonymous fd is closed. 3. Enqueue the reopen work to workqueue, thus user daemon could get rid of daemon_read context and handle that request smoothly. Otherwise, the user daemon will send a reopen request and wait for itself to process the request. Signed-off-by: Jia Zhu Link: https://lore.kernel.org/r/20231120041422.75170-4-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu Reviewed-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/internal.h | 3 ++ fs/cachefiles/ondemand.c | 98 ++++++++++++++++++++++++++++------------ 2 files changed, 72 insertions(+), 29 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index b0fe76964bc0..b9a90f1a0c01 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -47,9 +47,11 @@ struct cachefiles_volume { enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ + CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ }; struct cachefiles_ondemand_info { + struct work_struct ondemand_work; int ondemand_id; enum cachefiles_object_state state; struct cachefiles_object *object; @@ -326,6 +328,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); +CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index deb7e3007aa1..8e130de952f7 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -18,14 +18,10 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object); - /* - * Flush all pending READ requests since their completion depends on - * anon_fd. - */ - xas_for_each(&xas, req, ULONG_MAX) { + /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { if (req->msg.object_id == object_id && - req->msg.opcode == CACHEFILES_OP_READ) { - req->error = -EIO; + req->msg.opcode == CACHEFILES_OP_CLOSE) { complete(&req->done); xas_store(&xas, NULL); } @@ -179,6 +175,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) trace_cachefiles_ondemand_copen(req->object, id, size); cachefiles_ondemand_set_object_open(req->object); + wake_up_all(&cache->daemon_pollwq); out: complete(&req->done); @@ -222,7 +219,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; - req->msg.object_id = object_id; object->ondemand->ondemand_id = object_id; cachefiles_get_unbind_pincount(cache); @@ -238,6 +234,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) return ret; } +static void ondemand_object_worker(struct work_struct *work) +{ + struct cachefiles_ondemand_info *info = + container_of(work, struct cachefiles_ondemand_info, ondemand_work); + + cachefiles_ondemand_init_object(info->object); +} + +/* + * If there are any inflight or subsequent READ requests on the + * closed object, reopen it. + * Skip read requests whose related object is reopening. + */ +static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas, + unsigned long xa_max) +{ + struct cachefiles_req *req; + struct cachefiles_object *object; + struct cachefiles_ondemand_info *info; + + xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) { + if (req->msg.opcode != CACHEFILES_OP_READ) + return req; + object = req->object; + info = object->ondemand; + if (cachefiles_ondemand_object_is_close(object)) { + cachefiles_ondemand_set_object_reopening(object); + queue_work(fscache_wq, &info->ondemand_work); + continue; + } + if (cachefiles_ondemand_object_is_reopening(object)) + continue; + return req; + } + return NULL; +} + ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) { @@ -248,16 +281,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, int ret = 0; XA_STATE(xas, &cache->reqs, cache->req_id_next); + xa_lock(&cache->reqs); /* * Cyclically search for a request that has not ever been processed, * to prevent requests from being processed repeatedly, and make * request distribution fair. */ - xa_lock(&cache->reqs); - req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, ULONG_MAX); if (!req && cache->req_id_next > 0) { xas_set(&xas, 0); - req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1); } if (!req) { xa_unlock(&cache->reqs); @@ -277,14 +310,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xa_unlock(&cache->reqs); id = xas.xa_index; - msg->msg_id = id; if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); - if (ret) + if (ret) { + cachefiles_ondemand_set_object_close(req->object); goto error; + } } + msg->msg_id = id; + msg->object_id = req->object->ondemand->ondemand_id; + if (copy_to_user(_buffer, msg, n) != 0) { ret = -EFAULT; goto err_put_fd; @@ -317,19 +354,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, void *private) { struct cachefiles_cache *cache = object->volume->cache; - struct cachefiles_req *req; + struct cachefiles_req *req = NULL; XA_STATE(xas, &cache->reqs, 0); int ret; if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return 0; - if (test_bit(CACHEFILES_DEAD, &cache->flags)) - return -EIO; + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + ret = -EIO; + goto out; + } req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + if (!req) { + ret = -ENOMEM; + goto out; + } req->object = object; init_completion(&req->done); @@ -367,7 +408,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); - if (opcode != CACHEFILES_OP_OPEN && + if (opcode == CACHEFILES_OP_CLOSE && !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); @@ -392,7 +433,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, wake_up_all(&cache->daemon_pollwq); wait_for_completion(&req->done); ret = req->error; + kfree(req); + return ret; out: + /* Reset the object to close state in error handling path. + * If error occurs after creating the anonymous fd, + * cachefiles_ondemand_fd_release() will set object to close. + */ + if (opcode == CACHEFILES_OP_OPEN) + cachefiles_ondemand_set_object_close(object); kfree(req); return ret; } @@ -439,7 +488,6 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object->ondemand->ondemand_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -455,16 +503,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand->ondemand_id; - - /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (!cachefiles_ondemand_object_is_open(object)) { - WARN_ON_ONCE(object_id == 0); - pr_info_once("READ: anonymous fd closed prematurely.\n"); - return -EIO; - } - req->msg.object_id = object_id; load->off = read_ctx->off; load->len = read_ctx->len; trace_cachefiles_ondemand_read(object, &req->msg, load); @@ -513,6 +552,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, return -ENOMEM; object->ondemand->object = object; + INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker); return 0; } -- Gitee From d53932eae619f9e0bb1d8144d30c46c52319ed0d Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Mon, 20 Nov 2023 12:14:21 +0800 Subject: [PATCH 122/953] cachefiles: narrow the scope of triggering EPOLLIN events in ondemand mode ANBZ: #8342 commit b817e22b2e91257ace32a6768c3c003faeaa1c5c upstream. Don't trigger EPOLLIN when there are only reopening read requests in xarray. Suggested-by: Xin Yin Signed-off-by: Jia Zhu Link: https://lore.kernel.org/r/20231120041422.75170-5-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu Reviewed-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/daemon.c | 14 ++++++++++++-- fs/cachefiles/internal.h | 12 ++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 5f4df9588620..4611fcfb8182 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -355,14 +355,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file, struct poll_table_struct *poll) { struct cachefiles_cache *cache = file->private_data; + XA_STATE(xas, &cache->reqs, 0); + struct cachefiles_req *req; __poll_t mask; poll_wait(file, &cache->daemon_pollwq, poll); mask = 0; if (cachefiles_in_ondemand_mode(cache)) { - if (!xa_empty(&cache->reqs)) - mask |= EPOLLIN; + if (!xa_empty(&cache->reqs)) { + rcu_read_lock(); + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { + if (!cachefiles_ondemand_is_reopening_read(req)) { + mask |= EPOLLIN; + break; + } + } + rcu_read_unlock(); + } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) mask |= EPOLLIN; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index b9a90f1a0c01..26e5f8f123ef 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -329,6 +329,13 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return cachefiles_ondemand_object_is_reopening(req->object) && + req->msg.opcode == CACHEFILES_OP_READ; +} + #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) @@ -359,6 +366,11 @@ static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *ob static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) { } + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return false; +} #endif /* -- Gitee From 6b8f04c90aa1e156beca549be1bc827719c99c92 Mon Sep 17 00:00:00 2001 From: Jia Zhu Date: Mon, 20 Nov 2023 12:14:22 +0800 Subject: [PATCH 123/953] cachefiles: add restore command to recover inflight ondemand read requests ANBZ: #8342 commit e73fa11a356ca0905c3cc648eaacc6f0f2d2c8b3 upstream. Previously, in ondemand read scenario, if the anonymous fd was closed by user daemon, inflight and subsequent read requests would return EIO. As long as the device connection is not released, user daemon can hold and restore inflight requests by setting the request flag to CACHEFILES_REQ_NEW. Suggested-by: Gao Xiang Signed-off-by: Jia Zhu Signed-off-by: Xin Yin Link: https://lore.kernel.org/r/20231120041422.75170-6-zhujia.zj@bytedance.com Reviewed-by: Jingbo Xu Reviewed-by: David Howells Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/daemon.c | 1 + fs/cachefiles/internal.h | 3 +++ fs/cachefiles/ondemand.c | 23 +++++++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 4611fcfb8182..6465e2574230 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "tag", cachefiles_daemon_tag }, #ifdef CONFIG_CACHEFILES_ONDEMAND { "copen", cachefiles_ondemand_copen }, + { "restore", cachefiles_ondemand_restore }, #endif { "", NULL } }; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 26e5f8f123ef..4a87c9d714a9 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -303,6 +303,9 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args); +extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache, + char *args); + extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 8e130de952f7..b8fbbb1961bb 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -182,6 +182,29 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) return ret; } +int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + + XA_STATE(xas, &cache->reqs, 0); + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + /* + * Reset the requests to CACHEFILES_REQ_NEW state, so that the + * requests have been processed halfway before the crash of the + * user daemon could be reprocessed after the recovery. + */ + xas_lock(&xas); + xas_for_each(&xas, req, ULONG_MAX) + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + + wake_up_all(&cache->daemon_pollwq); + return 0; +} + static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) { struct cachefiles_object *object; -- Gitee From 66b08fa2a40c40ce9febe74c14a2c882d1a387ad Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 19 Jan 2024 20:49:34 +0000 Subject: [PATCH 124/953] cachefiles, erofs: Fix NULL deref in when cachefiles is not doing ondemand-mode ANBZ: #8342 commit c3d6569a43322f371e7ba0ad386112723757ac8f upstream. cachefiles_ondemand_init_object() as called from cachefiles_open_file() and cachefiles_create_tmpfile() does not check if object->ondemand is set before dereferencing it, leading to an oops something like: RIP: 0010:cachefiles_ondemand_init_object+0x9/0x41 ... Call Trace: cachefiles_open_file+0xc9/0x187 cachefiles_lookup_cookie+0x122/0x2be fscache_cookie_state_machine+0xbe/0x32b fscache_cookie_worker+0x1f/0x2d process_one_work+0x136/0x208 process_scheduled_works+0x3a/0x41 worker_thread+0x1a2/0x1f6 kthread+0xca/0xd2 ret_from_fork+0x21/0x33 Fix this by making cachefiles_ondemand_init_object() return immediately if cachefiles->ondemand is NULL. Fixes: 3c5ecfe16e76 ("cachefiles: extract ondemand info field from cachefiles_object") Reported-by: Marc Dionne Signed-off-by: David Howells cc: Gao Xiang cc: Chao Yu cc: Yue Hu cc: Jeffle Xu cc: linux-erofs@lists.ozlabs.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Jingbo Xu Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2791 --- fs/cachefiles/ondemand.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index b8fbbb1961bb..c15f2547222e 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) struct fscache_volume *volume = object->volume->vcookie; size_t volume_key_size, cookie_key_size, data_len; + if (!object->ondemand) + return 0; + /* * CacheFiles will firstly check the cache file under the root cache * directory. If the coherency check failed, it will fallback to -- Gitee From 81009511031bd5f40b89cd78bd14a7dcf71ffc4a Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Tue, 6 Feb 2024 18:20:08 +0800 Subject: [PATCH 125/953] anolis: config: modify deconfig files for Mont-TSSE Driver ANBZ: #8156 Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Reviewed-by: Xuchun Shang Link: https://gitee.com/anolis/cloud-kernel/pulls/2746 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2931e96d0ba8..ecdf002bc8a9 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7113,6 +7113,7 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index b031bb4585c3..3bafc6bbc1fe 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7104,6 +7104,7 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set -- Gitee From 7953d8c3b87055a0c65d7627986f86cb8831a068 Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Tue, 6 Feb 2024 18:23:13 +0800 Subject: [PATCH 126/953] anolis: crypto: add support for Mont-TSSE driver ANBZ: #8156 Mont-TSSE(TM) is a high speed crypto algorithm accelerator, it support SM2/3/4, AES and SHA algorithms. Mont-TSSE(TM) has 32 symmetric and 6 asymmetric crypto acceleration engines.The interface of Mont-TSSE(TM) system is PCIe 5.0x8. A total processing throughput is up to 200Gbps. Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Reviewed-by: Xuchun Shang Link: https://gitee.com/anolis/cloud-kernel/pulls/2746 --- drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + drivers/crypto/montage/Kconfig | 3 + drivers/crypto/montage/Makefile | 3 + drivers/crypto/montage/tsse/Kconfig | 9 + drivers/crypto/montage/tsse/Makefile | 15 + drivers/crypto/montage/tsse/tsse_dev.h | 101 +++ drivers/crypto/montage/tsse/tsse_dev_drv.c | 328 ++++++++++ drivers/crypto/montage/tsse/tsse_dev_drv.h | 25 + drivers/crypto/montage/tsse/tsse_dev_mgr.c | 229 +++++++ drivers/crypto/montage/tsse/tsse_fw_service.c | 163 +++++ drivers/crypto/montage/tsse/tsse_fw_service.h | 17 + drivers/crypto/montage/tsse/tsse_ipc.c | 221 +++++++ drivers/crypto/montage/tsse/tsse_ipc.h | 112 ++++ drivers/crypto/montage/tsse/tsse_irq.c | 30 + drivers/crypto/montage/tsse/tsse_irq.h | 30 + drivers/crypto/montage/tsse/tsse_log.h | 24 + drivers/crypto/montage/tsse/tsse_service.c | 30 + drivers/crypto/montage/tsse/tsse_service.h | 16 + drivers/crypto/montage/tsse/tsse_vuart.c | 596 ++++++++++++++++++ drivers/crypto/montage/tsse/tsse_vuart.h | 23 + drivers/crypto/montage/tsse/tsse_vuart_regs.h | 72 +++ 22 files changed, 2049 insertions(+) create mode 100644 drivers/crypto/montage/Kconfig create mode 100644 drivers/crypto/montage/Makefile create mode 100644 drivers/crypto/montage/tsse/Kconfig create mode 100644 drivers/crypto/montage/tsse/Makefile create mode 100644 drivers/crypto/montage/tsse/tsse_dev.h create mode 100644 drivers/crypto/montage/tsse/tsse_dev_drv.c create mode 100644 drivers/crypto/montage/tsse/tsse_dev_drv.h create mode 100644 drivers/crypto/montage/tsse/tsse_dev_mgr.c create mode 100644 drivers/crypto/montage/tsse/tsse_fw_service.c create mode 100644 drivers/crypto/montage/tsse/tsse_fw_service.h create mode 100644 drivers/crypto/montage/tsse/tsse_ipc.c create mode 100644 drivers/crypto/montage/tsse/tsse_ipc.h create mode 100644 drivers/crypto/montage/tsse/tsse_irq.c create mode 100644 drivers/crypto/montage/tsse/tsse_irq.h create mode 100644 drivers/crypto/montage/tsse/tsse_log.h create mode 100644 drivers/crypto/montage/tsse/tsse_service.c create mode 100644 drivers/crypto/montage/tsse/tsse_service.h create mode 100644 drivers/crypto/montage/tsse/tsse_vuart.c create mode 100644 drivers/crypto/montage/tsse/tsse_vuart.h create mode 100644 drivers/crypto/montage/tsse/tsse_vuart_regs.h diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c761952f0dc6..b03f7ed92793 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -796,5 +796,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/montage/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d859d6a5f3a4..94c8b187f739 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -51,3 +51,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += montage/ diff --git a/drivers/crypto/montage/Kconfig b/drivers/crypto/montage/Kconfig new file mode 100644 index 000000000000..e8e4b287a792 --- /dev/null +++ b/drivers/crypto/montage/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +source "drivers/crypto/montage/tsse/Kconfig" diff --git a/drivers/crypto/montage/Makefile b/drivers/crypto/montage/Makefile new file mode 100644 index 000000000000..a50415fe10c7 --- /dev/null +++ b/drivers/crypto/montage/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_TSSE) += tsse/ diff --git a/drivers/crypto/montage/tsse/Kconfig b/drivers/crypto/montage/tsse/Kconfig new file mode 100644 index 000000000000..5854f8e4525c --- /dev/null +++ b/drivers/crypto/montage/tsse/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CRYPTO_DEV_TSSE + tristate "Support for Montage(R) TSSE" + depends on X86 && PCI + select FW_LOADER + help + Support for Montage(R) TSSE for accelerating crypto workloads. + + To compile this as a module, choose M here. \ No newline at end of file diff --git a/drivers/crypto/montage/tsse/Makefile b/drivers/crypto/montage/tsse/Makefile new file mode 100644 index 000000000000..d67ffde3a5b0 --- /dev/null +++ b/drivers/crypto/montage/tsse/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This file is part of tsse driver for Linux +# +# Copyright © 2023 Montage Technology. All rights reserved. + +obj-m += tsse.o + +tsse-objs := tsse_dev_mgr.o \ + tsse_ipc.o \ + tsse_fw_service.o \ + tsse_service.o \ + tsse_irq.o \ + tsse_dev_drv.o \ + tsse_vuart.o diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h new file mode 100644 index 000000000000..d1dafee61300 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_H__ +#define __TSSE_DEV_H__ +#include +#include +#include +#include +#include +#include +#include +#include "tsse_ipc.h" + +#define TSSE_PCI_MAX_BARS 4 +#define TSSE_FW_VERSION_LEN 32 +struct tsse_bar { + void __iomem *virt_addr; + resource_size_t addr; + resource_size_t size; +}; +struct tsse_dev_pci { + struct pci_dev *pci_dev; + struct tsse_bar bars[TSSE_PCI_MAX_BARS]; + u8 revid; +}; +enum tsse_dev_status_bit { + TSSE_DEV_STATUS_STARTING = 0, + TSSE_DEV_STATUS_STARTED = 1 + +}; +struct tsse_qpairs_bank { + struct tsse_dev *tsse_dev; + void __iomem *reg_base; + + u32 num_qparis; + u32 irq_vec; +}; +struct tsse_dev { + struct module *owner; + struct dentry *debugfs_dir; + unsigned long status; + struct list_head list; + struct tsse_dev_pci tsse_pci_dev; + struct tsse_qpairs_bank qpairs_bank; + atomic_t ref_count; + bool is_vf; + int id; + u32 num_irqs; + u32 num_vfs; + struct uart_port *port; + struct tsse_ipc *ipc; + void *adi; + void *mbx_hw; + const struct firmware *fw; + char fw_version[TSSE_FW_VERSION_LEN]; +}; +#define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) +#define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) + +#include "tsse_log.h" + +struct list_head *tsse_devmgr_get_head(void); + +int tsse_dev_get(struct tsse_dev *tsse_dev); +void tsse_dev_put(struct tsse_dev *tsse_dev); +int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); +void tsse_devmgr_rm_dev(struct tsse_dev *tdev); +int tsse_prepare_restart_dev(struct tsse_dev *tdev); +int tsse_start_dev(struct tsse_dev *tdev); +struct tsse_dev *get_tssedev(int id); + +static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) +{ + return (struct tsse_dev *)pci_get_drvdata(pci_dev); +} + +static inline int tsse_get_cur_node(void) +{ + int cpu, node; + + cpu = get_cpu(); + node = topology_physical_package_id(cpu); + put_cpu(); + + return node; +} + +static inline int tsse_dev_started(struct tsse_dev *tdev) +{ + return test_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); +} +static inline int tsse_dev_in_use(struct tsse_dev *tdev) +{ + return atomic_read(&tdev->ref_count) != 0; +} +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c new file mode 100644 index 000000000000..9e914576a129 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "tsse_dev_drv.h" +#include "tsse_vuart.h" +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +static DEFINE_IDA(tsse_ida); + +static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) +{ + writel(enable ? 1 : 0, + TSSE_DEV_BARS(tdev)[2].virt_addr + 0x5780000 + 0x50000); +} +static int tsse_sriov_disable(struct tsse_dev *tdev) +{ + pci_disable_sriov(tdev->tsse_pci_dev.pci_dev); + tsse_qpair_enable_pf(tdev, true); + + return 0; +} + +static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + int ret = 0; + + if ((!tdev) || (num_vfs_param < 0) || (totalvfs <= 0)) { + dev_err(&pdev->dev, + "%s %d: failed to config sriov, tdev=%p totalvfs=%d num_vfs_param=%d\n", + __func__, __LINE__, tdev, totalvfs, num_vfs_param); + return -EBADE; + } + + if (num_vfs_param > totalvfs) + num_vfs_param = totalvfs; + + dev_info(&pdev->dev, "%s %d: has total %d vfs, and enable %d vfs\n", + __func__, __LINE__, totalvfs, num_vfs_param); + + if ((num_vfs_param > TSSE_PF_MAX_IRQ_NUM) || + (num_vfs_param > TSSE_PF_MAX_QPAIR_NUM)) { + tsse_dev_err( + tdev, + "vfs number is greater than pf's \"max_irq_num=%d or max_qpairs_num=%d\"\n", + TSSE_PF_MAX_IRQ_NUM, TSSE_PF_MAX_QPAIR_NUM); + return -EBADE; + } + + if (!tsse_dev_started(tdev)) { + dev_err(&pdev->dev, "%s %d: device is not started\n", __func__, + __LINE__); + return -EBADE; + } + + if (tsse_dev_in_use(tdev)) { + dev_err(&pdev->dev, "%s %d: device is busy\n", __func__, + __LINE__); + return -EBUSY; + } + + tsse_sriov_disable(tdev); + + tsse_prepare_restart_dev(tdev); + + tdev->num_vfs = num_vfs_param; + + if (tdev->num_vfs > 0) { + tdev->num_irqs = TSSE_SRIOV_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_SRIOV_PF_MAX_QPAIR_NUM; + } else { + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + } + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + ret = tsse_start_dev(tdev); + if (ret) { + dev_err(&pdev->dev, "%s %d: failed to start the device\n", + __func__, __LINE__); + return ret; + } + + if (num_vfs_param > 0) { + tsse_qpair_enable_pf(tdev, false); + pci_enable_sriov(pdev, num_vfs_param); + } + + return num_vfs_param; +} + +static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int status = 0; + int bar; + u32 tmp_val; + struct tsse_dev *tdev; + + if (!pdev->is_physfn) { + dev_err(&pdev->dev, "%s %d: this is not Physical fn\n", + __func__, __LINE__); + return -EPERM; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + dev_err(&pdev->dev, + "%s %d: invalid numa configuration for tsse\n", + __func__, __LINE__); + return -EINVAL; + } + + tdev = kzalloc_node(sizeof(*tdev), GFP_KERNEL, dev_to_node(&pdev->dev)); + + if (!tdev) + return -ENOMEM; + + status = pcim_enable_device(pdev); + + if (status) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto out_err; + } + + pci_set_master(pdev); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(48))) { + if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, + "failed to set tsse dma address width\n"); + status = -EFAULT; + goto out_err; + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48)); + } + + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + status = pcim_iomap_regions(pdev, BIT(0) | BIT(2), TSSE_DEV_NAME); + if (status) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out_err; + } + + for (bar = 2; bar < 4;) { + TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); + TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); + TSSE_DEV_BARS(tdev) + [bar].virt_addr = pcim_iomap_table(pdev)[bar]; + + dev_info(&pdev->dev, + "bar[%d]: addr=0x%llx, size=0x%llx, virt_addr=0x%lx\n", + bar, TSSE_DEV_BARS(tdev)[bar].addr, + TSSE_DEV_BARS(tdev)[bar].size, + (ulong)TSSE_DEV_BARS(tdev)[bar].virt_addr); + + bar += 2; + } + + tdev->owner = THIS_MODULE; + tdev->is_vf = false; + tdev->tsse_pci_dev.pci_dev = pdev; + tdev->id = ida_alloc(&tsse_ida, GFP_KERNEL); + if (tdev->id < 0) { + dev_err(&pdev->dev, "Unable to get id\n"); + status = tdev->id; + goto out_err; + } + + pci_set_drvdata(pdev, tdev); + + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + tdev->qpairs_bank.irq_vec = TSSE_PF_QPAIR_START_IRQ_VECTOR; + tdev->qpairs_bank.reg_base = + TSSE_DEV_BARS(tdev)[2].virt_addr + TSSE_PF_QPAIR_REG_BASE; + + tsse_qpair_enable_pf(tdev, true); + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + if (tsse_devmgr_add_dev(tdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_devmgr failed to add new device\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_ida_free; + } + + if (vuart_init_port(pdev)) { + dev_err(&pdev->dev, + "%s %d: vuart_init_port failed to init vuart.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_port_init; + } + /* Its result not break driver init process */ + if (!tsse_fw_load(pdev)) + get_firmware_version((char *)tdev->fw->data, tdev->fw->size, tdev->fw_version); + + if (tsse_ipc_init(pdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_ipc_init failed to tsse_ipc.\n", __func__, + __LINE__); + status = -EFAULT; + goto out_err_ipc; + } + + tsse_dev_info(tdev, "successful\n"); + + pci_read_config_dword(pdev, 0x720, &tmp_val); + tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); + + return 0; + +out_err_ipc: + vuart_uninit_port(pdev); +out_err_port_init: + tsse_devmgr_rm_dev(tdev); +out_err_ida_free: + ida_free(&tsse_ida, tdev->id); +out_err: + kfree(tdev); + return status; +} + +static void device_remove(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + pr_info("%s %d: pci_dev 0x%lx tsse_dev 0x%lx\n", __func__, __LINE__, + (ulong)pdev, (ulong)tdev); + + tsse_sriov_disable(tdev); + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } + tsse_ipc_deinit(tdev); + vuart_uninit_port(pdev); + tsse_devmgr_rm_dev(tdev); + ida_free(&tsse_ida, tdev->id); + kfree(tdev); + dev_info(&pdev->dev, "%s %d: successful\n", __func__, __LINE__); +} + +static const struct pci_device_id pci_ids[] = { + { + PCI_DEVICE(0x1b00, 0xc011), + }, + { + PCI_DEVICE(0x1b00, 0xd011), + }, + { 0 } +}; + +static struct pci_driver pci_driver = { + .name = TSSE_DEV_NAME, + .id_table = pci_ids, + .probe = device_probe, + .remove = device_remove, + .sriov_configure = tsse_sriov_configure, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static int __init tsse_init(void) +{ + int status; + + status = vuart_register(); + if (status) { + pr_err("vuart_register failed[%d].\n", status); + return status; + } + + status = pci_register_driver(&pci_driver); + if (status) { + vuart_unregister(); + return status; + } + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; +} + +static void __exit tsse_exit(void) +{ + pci_unregister_driver(&pci_driver); + vuart_unregister(); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} + +module_init(tsse_init); +module_exit(tsse_exit); + +MODULE_AUTHOR("montage-tech.com"); +MODULE_DESCRIPTION("TSSE device driver"); +MODULE_VERSION("1.0.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TSSE_FIRMWARE); diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.h b/drivers/crypto/montage/tsse/tsse_dev_drv.h new file mode 100644 index 000000000000..6a05572a3849 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_DRV_H__ +#define __TSSE_DEV_DRV_H__ +#define TSSE_DEV_NAME "tsse" + +// TODO: need to support full qpairs +#define TSSE_PF_MAX_QPAIR_NUM 16 + +#define TSSE_PF_MAX_IRQ_NUM 96 +#define TSSE_PF_QPAIR_START_IRQ_VECTOR 32 + +#define TSSE_SRIOV_PF_MAX_QPAIR_NUM 0 +#define TSSE_SRIOV_PF_MAX_IRQ_NUM 16 + +#define TSSE_PF_QPAIR_REG_BASE 0x5700000 + +#include "tsse_dev.h" + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c new file mode 100644 index 000000000000..159f75c8f46f --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" +static DEFINE_MUTEX(tsse_dev_table_lock); +static LIST_HEAD(tsse_dev_table); + +static DEFINE_MUTEX(algs_lock); + +static inline void tsse_list_del(struct list_head *entry) +{ + WRITE_ONCE(entry->next->prev, entry->prev); + WRITE_ONCE(entry->prev->next, entry->next); +} +static inline void tsse_list_add(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + WRITE_ONCE(new->next, next); + WRITE_ONCE(new->prev, prev); + mb(); /* Make sure new node updates first */ + WRITE_ONCE(next->prev, new); + WRITE_ONCE(prev->next, new); +} + +static inline void tsse_list_add_tail(struct list_head *new, + struct list_head *head) +{ + tsse_list_add(new, head->prev, head); +} + +static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) +{ + int ret = 0; + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return 0; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_add_return(1, &pf_tsse_dev->ref_count) == 1) { + if (!try_module_get(pf_tsse_dev->owner)) + ret = -EFAULT; + } + } + return ret; +} + +static void tsse_dev_pf_put(struct tsse_dev *vf_tsse_dev) +{ + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_sub_return(1, &pf_tsse_dev->ref_count) == 0) + module_put(pf_tsse_dev->owner); + } +} + +int tsse_dev_get(struct tsse_dev *tdev) +{ + int ref_count = atomic_add_return(1, &tdev->ref_count); + + if (!tsse_dev_started(tdev)) { + atomic_sub(1, &tdev->ref_count); + return -EAGAIN; + } + + if (ref_count == 1) { + if (!try_module_get(tdev->owner)) + return -EFAULT; + if (tdev->is_vf) + return tsse_dev_pf_get(tdev); + } + return 0; +} +void tsse_dev_put(struct tsse_dev *tdev) +{ + if (atomic_sub_return(1, &tdev->ref_count) == 0) { + module_put(tdev->owner); + if (tdev->is_vf) + tsse_dev_pf_put(tdev); + } +} + +int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +{ + int times, max_retry = 150; + + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + + for (times = 0; times < max_retry; times++) { + if (!tsse_dev_in_use(tdev)) + break; + msleep(100); + } + + if (times >= max_retry) { + tsse_dev_err(tdev, "Failed to stop busy device\n"); + if (busy_exit) + return -EBUSY; + } + if (tdev->qpairs_bank.num_qparis != 0) { + mutex_lock(&tsse_dev_table_lock); + tsse_list_del(&tdev->list); + mutex_unlock(&tsse_dev_table_lock); + tsse_dev_info(tdev, "removed from active dev table list\n"); + } + + tsse_dev_info(tdev, "device stopped\n"); + + return 0; +} + +int tsse_start_dev(struct tsse_dev *tdev) +{ + struct tsse_dev *tmp_dev; + struct list_head *prev_node = &tsse_dev_table; + int ret = 0; + + if (tdev->qpairs_bank.num_qparis == 0) { + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_dev_info(tdev, "device started\n"); + return 0; + } + + set_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + + mutex_lock(&tsse_dev_table_lock); + + list_for_each_entry(tmp_dev, &tsse_dev_table, list) { + if (tmp_dev == tdev) { + ret = -EEXIST; + tsse_dev_err(tdev, + "The device cannot be added repeatedly\n"); + goto clear_status; + } + } + + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_list_add(&tdev->list, prev_node, prev_node->next); + + tsse_dev_info(tdev, "device started\n"); + mutex_unlock(&tsse_dev_table_lock); + + return 0; +clear_status: + mutex_unlock(&tsse_dev_table_lock); + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + return ret; +} +EXPORT_SYMBOL_GPL(tsse_start_dev); + +int tsse_prepare_restart_dev(struct tsse_dev *tdev) +{ + return tsse_stop_dev(tdev, false); +} +EXPORT_SYMBOL_GPL(tsse_prepare_restart_dev); + +void tsse_devmgr_rm_dev(struct tsse_dev *tdev) +{ + tsse_stop_dev(tdev, false); + tsse_dev_free_irq_vectors(tdev); + msleep(300); +} +EXPORT_SYMBOL_GPL(tsse_devmgr_rm_dev); + +int tsse_devmgr_add_dev(struct tsse_dev *tdev) +{ + int ret; + + ret = tsse_dev_alloc_irq_vectors(tdev); + if (ret == 0) { + atomic_set(&tdev->ref_count, 0); + tdev->status = 0; + ret = tsse_start_dev(tdev); + + if (ret != 0) + tsse_dev_free_irq_vectors(tdev); + } + return ret; +} +EXPORT_SYMBOL_GPL(tsse_devmgr_add_dev); + +struct list_head *tsse_devmgr_get_head(void) +{ + return &tsse_dev_table; +} + +struct tsse_dev *get_tssedev(int id) +{ + struct list_head *itr; + struct tsse_dev *ptr; + + mutex_lock(&tsse_dev_table_lock); + + list_for_each(itr, &tsse_dev_table) { + ptr = list_entry(itr, struct tsse_dev, list); + break; + } + + mutex_unlock(&tsse_dev_table_lock); + + return ptr; +} +EXPORT_SYMBOL_GPL(get_tssedev); diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c new file mode 100644 index 000000000000..fc3907a7c503 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_service.h" + +#define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" +#define SEARCH_PATTERN_LEN 28 + +int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + u8 *h2d; + u32 int_reg; + u32 rc; + + mutex_lock(&tsseipc->list_lock); + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + rc = -1; + mutex_unlock(&tsseipc->list_lock); + return rc; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + msg->header.i_len - sizeof(struct ipc_header)); + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + + dev_info(tsseipc->dev, "notify device to get firmware\n"); + mutex_unlock(&tsseipc->list_lock); + return 0; +} + +void fw_free(void *msg_t) +{ + struct tsse_msg *tssemsg; + struct ipc_msg *payload; + + payload = (struct ipc_msg *)msg_t; + tssemsg = container_of(payload, struct tsse_msg, ipc_payload); + + kvfree(tssemsg); +} + +int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) +{ + char *pattern; + char *space_ch = " "; + uint32_t pattern_i = 0, buffer_i = 0; + uint32_t pattern_len = SEARCH_PATTERN_LEN - 1; // Not include "\0" + uint32_t version_start = 0; + uint32_t version_len = 0; + + pattern = kzalloc(SEARCH_PATTERN_LEN, GFP_KERNEL); + if (!pattern) + return -1; + + snprintf(pattern, SEARCH_PATTERN_LEN, SEARCH_PATTERN); + + while (buffer_i < buffer_len) { + if (pattern[pattern_i] == fw_buffer[buffer_i]) { + buffer_i++; + pattern_i++; + } + if (pattern_i == pattern_len) { + break; // pattern found + } else if ((buffer_i < buffer_len) && + (pattern[pattern_i] != fw_buffer[buffer_i])) { + // mismatch after pattern_i matches + if (pattern_i != 0) { + // since the pattern has no common prefix, when mismatch, + // the next compare should start from pattern beginning + pattern_i = 0; + } else { + buffer_i++; + } + } + } + kfree(pattern); + if (pattern_i == pattern_len) { + buffer_i++; + version_start = buffer_i; + while (buffer_i < buffer_len) { + if (fw_buffer[buffer_i] == space_ch[0]) { + version_len = buffer_i - version_start; + strscpy(fw_version, fw_buffer + version_start, version_len + 1); + return 0; + } + buffer_i++; + } + } + return -1; +} + +void fw_service(void *tsseipc_t, void *msg_t) +{ + void __iomem *fw; + uint32_t size; + uint32_t task_offset; + struct fw_load *fw_task; + struct tsse_dev *tdev; + struct tsse_ipc *tsseipc = (struct tsse_ipc *)tsseipc_t; + struct ipc_msg *msg = (struct ipc_msg *)msg_t; + + task_offset = sizeof(struct msg_info); + fw_task = (struct fw_load *)(msg->i_data + + task_offset / sizeof(uint32_t)); + + tdev = pci_to_tsse_dev(tsseipc->pdev); + if (!tdev || !tdev->fw) { + fw_task->result = 1; + fw_task->size = 0; + dev_info(tsseipc->dev, "firmware loading failed\n"); + fw_send_msg(tsseipc, msg); + fw_free(msg); + return; + } + + fw_task->result = 0; + fw_task->size = tdev->fw->size; + size = tdev->fw->size; + fw = tsseipc->virt_addr + fw_task->offset + FW_BASE; + + memcpy_toio((u8 *)fw, tdev->fw->data, size); + dev_info(tsseipc->dev, "firmware loading done\n"); + fw_send_msg(tsseipc, msg); + fw_free(msg); + + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } +} + +int tsse_fw_load(struct pci_dev *pdev) +{ + int result; + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + result = request_firmware(&tdev->fw, TSSE_FIRMWARE, &pdev->dev); + if (result) + dev_err(&pdev->dev, "%s failed\n", __func__); + return result; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h new file mode 100644 index 000000000000..973ca6a0bce9 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_FW_SERVICE_H__ +#define __TSSE_FW_SERVICE_H__ + +#define FW_BASE 0x7000000 +#define TSSE_FIRMWARE "tsse_firmware.bin" + +void fw_service(void *tsseipc_t, void *msg_t); +int tsse_fw_load(struct pci_dev *pdev); +int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c new file mode 100644 index 000000000000..0f92c096f211 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tsse_ipc.h" +#include "tsse_dev.h" +#include "tsse_service.h" + +struct tsse_msg *get_msginf(void __iomem *d2h) +{ + uint32_t u_len; + struct tsse_msg *tssemsg; + + struct ipc_header *ipc_info = (struct ipc_header *)d2h; + + u_len = ipc_info->i_len - sizeof(struct ipc_header); + + tssemsg = (struct tsse_msg *)(kzalloc(sizeof(struct tsse_msg) + u_len, + GFP_ATOMIC)); + + if (!tssemsg) { + pr_info("%s(): tssemsg kzalloc failed\n", __func__); + return NULL; + } + + tssemsg->ipc_payload.header.inst_id = ipc_info->inst_id; + tssemsg->ipc_payload.header.tgid = ipc_info->tgid; + tssemsg->ipc_payload.header.i_len = ipc_info->i_len; + + return tssemsg; +} + +void ipc_recieve_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + uint32_t u_len = msg->header.i_len - sizeof(struct ipc_header); + uint32_t *msg_data = NULL; + void __iomem *d2h = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + + msg_data = (uint32_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio(msg->i_data, msg_data, u_len); + return; + +} + +int msg_rout(struct tsse_ipc *tsseipc, struct tsse_msg *tssemsg) +{ + int ret = 0; + struct ipc_msg *msg; + struct msg_info *info; + uint32_t msg_class; + + msg = &tssemsg->ipc_payload; + + ipc_recieve_msg(tsseipc, msg); + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + if (msg_class == IPC_MESSAGE_BOOT) { + service_rout(tsseipc, msg); + return 0; + } + + return ret; +} + +static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)dev_id; + + writel(0x0, tsseipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); + tasklet_hi_schedule(&tsseipc->ipc_handle); + dev_err(tsseipc->dev, "irq%d\n", irq); + return IRQ_HANDLED; +} + +bool check_send_enbit(struct tsse_ipc *tsseipc) +{ + u32 int_reg; + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) == 0) + return true; + else + return false; +} +EXPORT_SYMBOL(check_send_enbit); + +void notify_device(struct tsse_ipc *tsseipc) +{ + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + return; + +} +EXPORT_SYMBOL(notify_device); + +void ipc_send_msg(struct tsse_ipc *tsseipc, struct ipc_data *msg) +{ + u8 *h2d = NULL; + + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_ptr, + msg->header.i_len - sizeof(struct ipc_header)); + return; + +} + +void ipc_hw_init(struct tsse_ipc *hw_ipc) +{ + writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); + writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); +} + +int ipc_init_msg(struct tsse_ipc *tsseipc) +{ + u8 *h2d; + u32 int_reg; + u32 rc; + u32 cmd_len; + struct ipc_msg *msg; + struct msg_info *info_msg; + + msg = (struct ipc_msg *)(kzalloc( + sizeof(struct ipc_msg) + sizeof(struct msg_info), GFP_ATOMIC)); + + if (!msg) { + pr_info("%s(): msg kzalloc failed\n", __func__); + return -1; + } + cmd_len = sizeof(uint32_t); + msg->header.i_len = + sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + info_msg = (struct msg_info *)msg->i_data; + info_msg->msg_class = IPC_MESSAGE_BASIC; + *(msg->i_data + sizeof(struct msg_info) / 4) = IPC_BASIC_CMD_HOST_INIT; + + mutex_lock(&tsseipc->list_lock); + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + rc = -1; + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + return rc; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + sizeof(struct msg_info) + sizeof(uint32_t)); + + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + + return 0; +} + +static void tsse_ipc_bh_handler(unsigned long data) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; + + void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + struct tsse_msg *msg_tsse = get_msginf(d2h_payload); + + if (!msg_tsse) { + dev_err(tsseipc->dev, "get_msginf is NULL\n"); + return; + } + msg_rout(tsseipc, msg_tsse); +} + +int tsse_ipc_init(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc; + int rc; + + ipc = devm_kzalloc(&pdev->dev, sizeof(*ipc), GFP_KERNEL); + if (ipc == NULL) + return -ENOMEM; + tdev->ipc = ipc; + ipc->pdev = pdev; + ipc->dev = &pdev->dev; + ipc->virt_addr = TSSE_DEV_BARS(tdev)[2].virt_addr; + + mutex_init(&ipc->list_lock); + tasklet_init(&(ipc->ipc_handle), tsse_ipc_bh_handler, + (ulong)(ipc)); + + rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, + tsse_ipc_d2h_irqhandler, IRQF_SHARED, + "pf-ipc", ipc); + ipc_hw_init(ipc); + ipc_init_msg(ipc); + + return rc; +} +EXPORT_SYMBOL_GPL(tsse_ipc_init); + +void tsse_ipc_deinit(void *tdev_t) +{ + struct tsse_ipc *tsseipc; + struct pci_dev *pdev; + struct tsse_dev *tdev; + + tdev = tdev_t; + tsseipc = tdev->ipc; + pdev = tsseipc->pdev; + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + return; + +} +EXPORT_SYMBOL_GPL(tsse_ipc_deinit); diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h new file mode 100644 index 000000000000..59dcbf6eafc4 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TM_HOST_IPC_H__ +#define __TM_HOST_IPC_H__ + +#include +#include +#include + +#define TSSE_PASID_SVA + +#define HOST2MAIN_INTR_SET_OFFSET 0x2000 +#define HOST2MAIN_INTR_ENABLE_OFFSET 0x2004 +#define HOST2MAIN_ACK_INTR_CLR_OFFSET 0x2008 +#define HOST2MAIN_ACK_INTR_ENABLE_OFFSET 0x200c +#define HOST2MAIN_VLD_INTR_STATUS_OFFSET 0x2010 +#define HOST2MAIN_ACK_INTR_STATUS_OFFSET 0x2014 +#define MSIX_MASK_EN_REG_OFFSET 0x2020 +#define INTR_MASK_BIT_OFFSET 0x2024 +#define INTR_PENDING_BIT_OFFSET 0x2028 +#define HOST2MAIN_IPC_OFFSET 0x2400 + +#define MAIN2HOST_INTR_SET_OFFSET 0x3000 +#define MAIN2HOST_INTR_ENABLE_OFFSET 0x3004 +#define MAIN2HOST_ACK_INTR_CLR_OFFSET 0x3008 +#define MAIN2HOST_ACK_INTR_ENABLE_OFFSET 0x300c +#define MAIN2HOST_VEN_MSI_FUNC_NUM_OFFSET 0x3010 +#define MAIN2HOST_VEN_MSI_VFUNC_ACTIVE_OFFSET 0x3014 +#define MAIN2HOST_IPC_OFFSET 0x3400 + +#define IPC_REGISTER_INT_SET BIT(0) +#define IPC_REGISTER_INT_MASK BIT(1) + +enum IPC_BASIC_CMD { + IPC_BASIC_CMD_HOST_INIT = 0x1, + IPC_BASIC_CMD_PING = 0x2, +}; + +enum IPC_BOOT_CMD { + IPC_BOOT_CMD_GET_FIRMWARE = 0x1, +}; + +enum IPC_MESSAGE_CLASS { + IPC_MESSAGE_BASIC = 1, + IPC_MESSAGE_BOOT, + IPC_MESSAGE_CLASS_NUM, +}; + +struct ipc_header { + uint32_t inst_id; + pid_t tgid; + uint32_t i_len; + uint32_t pasid : 20; + uint32_t reserved_1 : 4; + uint32_t pasid_en : 8; + + uint32_t reserved[2]; +}; + +struct ipc_data { + struct ipc_header header; + void *i_ptr; +}; + +struct ipc_msg { + struct ipc_header header; + uint32_t i_data[]; +}; + +struct fw_load { + uint32_t command; + uint32_t result; + uint8_t name[32]; + uint32_t offset; + uint32_t size; +}; + +struct msg_info { + uint32_t host_id; + uint32_t msg_class; + uint32_t flags; + uint32_t reserved[3]; +}; + +struct ipc_layout { + struct ipc_header header; + struct msg_info info; +}; + +struct tsse_msg { + struct list_head list; + struct ipc_msg ipc_payload; +}; + +struct tsse_ipc { + struct device *dev; + struct pci_dev *pdev; + void __iomem *virt_addr; + struct mutex list_lock; + struct tasklet_struct ipc_handle; +}; + +int tsse_ipc_init(struct pci_dev *pdev); +void tsse_ipc_deinit(void *tdev); +bool check_send_enbit(struct tsse_ipc *tsseipc); +void notify_device(struct tsse_ipc *tsseipc); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_irq.c b/drivers/crypto/montage/tsse/tsse_irq.c new file mode 100644 index 000000000000..8cb94fea3da4 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" + +#undef TSSE_IRQ_DBG + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev) +{ + int request_num = tdev->num_irqs; + int irq_num = pci_alloc_irq_vectors(tdev->tsse_pci_dev.pci_dev, + request_num, request_num, + PCI_IRQ_MSIX); + + if (irq_num < 0) { + dev_err(TSSEDEV_TO_DEV(tdev), + "%s %d :failed to alloc MSIX interrupt vectors\n", + __func__, __LINE__); + return irq_num; + } + + return 0; +} diff --git a/drivers/crypto/montage/tsse/tsse_irq.h b/drivers/crypto/montage/tsse/tsse_irq.h new file mode 100644 index 000000000000..09bed4e6d58a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_IRQ_H__ +#define __TSSE_IRQ_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "tsse_dev.h" + +static inline void tsse_dev_free_irq_vectors(struct tsse_dev *tdev) +{ + pci_free_irq_vectors(tdev->tsse_pci_dev.pci_dev); +} + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_log.h b/drivers/crypto/montage/tsse/tsse_log.h new file mode 100644 index 000000000000..153cbe16374e --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_log.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_LOG_H__ +#define __TSSE_LOG_H__ + +#define tsse_dev_err(tssedev, fmt, ...) \ + dev_err(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_warn(tssedev, fmt, ...) \ + dev_warn(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_info(tssedev, fmt, ...) \ + dev_info(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_dbg(tssedev, fmt, ...) \ + dev_dbg(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c new file mode 100644 index 000000000000..64121a655803 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ +#include +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + struct msg_info *info; + uint32_t msg_class; + int ret; + + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + switch (msg_class) { + case IPC_MESSAGE_BOOT: + fw_service(tsseipc, msg); + break; + + default: + ret = -EINVAL; + break; + } + return 0; + +} diff --git a/drivers/crypto/montage/tsse/tsse_service.h b/drivers/crypto/montage/tsse/tsse_service.h new file mode 100644 index 000000000000..d5fd87ee7dce --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_SERVICE_H__ +#define __TSSE_SERVICE_H__ + +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c new file mode 100644 index 000000000000..f49d4ffc9f3c --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_vuart_regs.h" +#include "tsse_vuart.h" + +#ifdef DEBUG +#define VUART_PRINT(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#else +#define VUART_PRINT(fmt, ...) +#endif + +#define TSSE_VUART_BAUD (38400) +#define TSSE_VUART_MAX_RX_COUNT (256) +#define BOTH_EMPTY (VUART_FSR_TXFIFOE | VUART_FSR_RXFIFO) +struct tsse_vuart { + struct uart_port port; + unsigned int tx_threshold; + unsigned int rx_threshold; + unsigned int tx_loadsz; + unsigned char shutdown; + unsigned char confige_done; +}; + +#define SERIAL_LSR_NAME "tsse_vuart" + +static struct uart_driver g_vuart_reg = { + .owner = THIS_MODULE, + .driver_name = SERIAL_LSR_NAME, + .dev_name = "ttyTSSE", + .nr = TSSE_VUART_MAX_DEV, +}; + +static unsigned int g_trigger_level[4] = { 0, 31, 63, 111 }; +static unsigned long g_line[TSSE_VUART_BITMAP_SIZE]; + +static unsigned int vuart_serial_in(struct uart_port *port, int offset) +{ + unsigned int ret = le32_to_cpu(readl(port->membase + offset)); +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, ret); +#endif + return ret; +} + +static void vuart_serial_out(struct uart_port *port, int offset, int value) +{ +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, value); +#endif + value = cpu_to_le32(value); + writel(value, port->membase + offset); +} + +static void vuart_wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = vuart_serial_in(port, VUART_FSR); + if (FIELD_GET(VUART_FSR_TXFIFOE, status)) + break; + if (--tmout == 0) { + pr_err("%s:timeout(10ms), TX is not empty.\n", + __func__); + break; + } + udelay(1); + touch_nmi_watchdog(); + } +} + +static unsigned int vuart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&port->lock, flags); + lsr = vuart_serial_in(port, VUART_FSR); + spin_unlock_irqrestore(&port->lock, flags); + + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; +} + +static void vuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int vuart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void vuart_stop_tx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~VUART_IER_HETXEI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + int count; + + if (port->x_char) { + pr_err("x_char %d\n", port->x_char); + return; + } + + if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { + vuart_stop_tx(port); + return; + } + + count = vuart->tx_loadsz; + do { + vuart_serial_out(port, VUART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void vuart_start_tx(struct uart_port *port) +{ + unsigned int ier, fsr; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + if (uart_tx_stopped(port)) { + vuart_stop_tx(port); + return; + } + + fsr = vuart_serial_in(port, VUART_FSR); + VUART_PRINT("==>Existing Data number in TX FIFO %ld\n", + FIELD_GET(VUART_FSR_TFIFODN, fsr)); + VUART_PRINT("==>Existing Data number in RX FIFO %ld\n", + FIELD_GET(VUART_FSR_RFIFODN, fsr)); + if (fsr & VUART_FSR_TXFIFOE) + vuart_tx_chars(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_HETXEI | VUART_IER_HETXUI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_throttle(struct uart_port *port) +{ +} + +static void vuart_unthrottle(struct uart_port *port) +{ +} + +static void vuart_stop_rx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~(VUART_IER_HERXTOI | VUART_IER_HETXDRI | VUART_IER_HERXOI); + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_enable_ms(struct uart_port *port) +{ +} + +static void vuart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static irqreturn_t vuart_interrupt(int irq, void *port) +{ + int handled = 0; + struct uart_port *p = (struct uart_port *)port; + + if (p->handle_irq(p)) + handled = 1; + + return IRQ_RETVAL(handled); +} + +static void vuart_check_config_done(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (vuart_serial_in(port, VUART_CFG) == 1) + vuart->confige_done = 1; +} + +static int vuart_startup(struct uart_port *port) +{ + unsigned int ret, hcr, ier, fcr = 0; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (port->flags & UPF_SHARE_IRQ) + port->irqflags |= IRQF_SHARED; + ret = request_irq(port->irq, vuart_interrupt, port->irqflags, + "tsse_uart", port); + if (ret) + return ret; + + hcr = vuart_serial_in(port, VUART_HCR); + vuart->rx_threshold = FIELD_GET(VUART_HCR_RFIFOT, hcr); + vuart->tx_threshold = FIELD_GET(VUART_HCR_TFIFOT, hcr); + fcr |= FIELD_PREP(VUART_FCR_RFIFOT, vuart->rx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFOT, vuart->tx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFORST, 1); + fcr |= FIELD_PREP(VUART_FCR_RFIFORST, 1); + vuart_serial_out(port, VUART_FCR, fcr); + + vuart->rx_threshold = g_trigger_level[vuart->rx_threshold]; + vuart->tx_threshold = g_trigger_level[vuart->tx_threshold]; + + vuart_check_config_done(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_CCFGDI | VUART_IER_HETXDRI | VUART_IER_HERXTOI; + vuart_serial_out(port, VUART_IER, ier); + + vuart_serial_out(port, VUART_SCR, FIELD_PREP(VUART_SCR_SCR, 1)); + + vuart->shutdown = 0; + + return 0; +} + +static void vuart_shutdown(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + vuart->shutdown = 1; + vuart_stop_rx(port); + vuart_stop_tx(port); + free_irq(port->irq, port); + vuart_serial_out(port, VUART_SCR, 0); +} + +static void vuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) != CS8) + pr_err("Warning:termios is not CS8.\n"); + + baud = uart_get_baud_rate(port, termios, old, 0, TSSE_VUART_BAUD); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = + VUART_FSR_TXFIFOE | VUART_FSR_TXOE | VUART_FSR_RXDR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= VUART_FSR_RXUE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= VUART_FSR_RXUE; + if (termios->c_iflag & (IGNBRK | IGNPAR)) + port->ignore_status_mask |= VUART_FSR_TXFIFOE; + + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= VUART_FSR_RXDR; + pr_err("Warning:termios is not set CREAD.\n"); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void vuart_set_ldisc(struct uart_port *port, struct ktermios *ktermios) +{ +} + +static void vuart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ +} + +static void vuart_release_port(struct uart_port *port) +{ +} + +static int vuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void vuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_16550A; +} + +static int vuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_16550A) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static void vuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned int ier_save; + + ier_save = vuart_serial_in(port, VUART_IER); + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_TX, c); + + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_IER, ier_save); +} + +static int vuart_poll_get_char(struct uart_port *port) +{ + int status; + + status = vuart_serial_in(port, VUART_FSR); + if (!FIELD_GET(VUART_FSR_RXDR, status)) + return NO_POLL_CHAR; + + return vuart_serial_in(port, VUART_RX); +} + +#endif + +static const char *vuart_type(struct uart_port *port) +{ + return "tsse_vuart"; +} + +static const struct uart_ops vuart_ops = { + .tx_empty = vuart_tx_empty, + .set_mctrl = vuart_set_mctrl, + .get_mctrl = vuart_get_mctrl, + .stop_tx = vuart_stop_tx, + .start_tx = vuart_start_tx, + .throttle = vuart_throttle, + .unthrottle = vuart_unthrottle, + .stop_rx = vuart_stop_rx, + .enable_ms = vuart_enable_ms, + .break_ctl = vuart_break_ctl, + .startup = vuart_startup, + .shutdown = vuart_shutdown, + .set_termios = vuart_set_termios, + .set_ldisc = vuart_set_ldisc, + .pm = vuart_pm, + .type = vuart_type, + .release_port = vuart_release_port, + .request_port = vuart_request_port, + .config_port = vuart_config_port, + .verify_port = vuart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vuart_poll_get_char, + .poll_put_char = vuart_poll_put_char, +#endif +}; + +static unsigned int vuart_rx_chars(struct uart_port *port, unsigned int lsr) +{ + int max_count = TSSE_VUART_MAX_RX_COUNT; + unsigned char ch; + struct tty_port *tport = &port->state->port; + + do { + if (lsr & VUART_FSR_RXDR) + ch = vuart_serial_in(port, VUART_RX); + else + ch = 0; + port->icount.rx++; + if (lsr & VUART_FSR_RXUE) { + port->icount.overrun++; + pr_err("income byte underflow, record and clear int.\n"); + vuart_serial_out(port, VUART_IIR, VUART_IIR_RXUE); + } + + if (!uart_prepare_sysrq_char(port, ch)) { + if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) + ++port->icount.buf_overrun; + } + + if (--max_count == 0) + break; + lsr = vuart_serial_in(port, VUART_FSR); + } while (lsr & VUART_FSR_RXDR); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} + +static int vuart_deal_irq(struct uart_port *port, unsigned int iir) +{ + unsigned char status; + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (iir & VUART_IIR_CPUCD) + vuart->confige_done = 1; + + status = vuart_serial_in(port, VUART_FSR); + if (port->read_status_mask & VUART_FSR_RXDR) + vuart_rx_chars(port, status); + else + pr_err("read_status_mask not set VUART_FSR_RXDR, ignor rx.\n"); + + ier = vuart_serial_in(port, VUART_IER); + if (!(status & VUART_FSR_TXOE) && (status & VUART_FSR_TXFIFOE) && + (ier & VUART_IER_HETXEI)) + vuart_tx_chars(port); + + return 1; +} + +#ifdef DEBUG +static void vuart_debug_iir(unsigned int iir) +{ + VUART_PRINT("%s called iir %u.\n", __func__, iir); + if (iir & VUART_IIR_TXEI) + pr_err("TX FIFO empty interrupt.\n"); + + if (iir & VUART_IIR_RXTOI) + pr_err("Host RX FIFO character timeout interrupt.\n"); + + if (iir & VUART_IIR_RXDAI) + pr_err("Host RX FIFO data available interrupt.\n"); + + if (iir & VUART_IIR_RXUE) + pr_err("HOST RX FIFO Underflow error.\n"); + + if (iir & VUART_IIR_TXOE) + pr_err("HOST TX FIFO Overrun error.\n"); + + if (iir & VUART_IIR_CPUCD) + pr_err("CPU has finished configuration for virtual UART"); + + if (iir & VUART_IIR_TXFI) + pr_err("Host TX FIFO full interrupt.\n"); +} +#endif + +static int vuart_handle_irq(struct uart_port *port) +{ + unsigned int iir; + unsigned long flags; + int ret; + + iir = vuart_serial_in(port, VUART_IIR); + vuart_serial_out(port, VUART_IIR, iir); +#ifdef DEBUG + vuart_debug_iir(iir); +#endif + spin_lock_irqsave(&port->lock, flags); + ret = vuart_deal_irq(port, iir); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return ret; +} + +static int vuart_get_line(void) +{ + int bit = 0; + + bit = find_first_zero_bit(&g_line[0], TSSE_VUART_MAX_DEV); + if (bit >= TSSE_VUART_MAX_DEV) + return -ENOSPC; + set_bit(bit, &g_line[0]); + return bit; +} + +static void vuart_free_line(int line) +{ + clear_bit(line, &g_line[0]); +} + +int vuart_init_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = NULL; + struct uart_port *p = NULL; + int ret = 0; + int line = vuart_get_line(); + + if (line == -ENOSPC) { + dev_err(&pdev->dev, "device too more, max is 64.\n"); + return -ENOMEM; + } + + vuart = kzalloc_node(sizeof(struct tsse_vuart), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!vuart) { + ret = -ENOMEM; + goto zalloc_fail; + } + vuart->shutdown = 1; + p = &(vuart->port); + p->mapbase = 0; + p->mapsize = 0; + p->membase = TSSE_DEV_BARS(tdev)[2].virt_addr + RLS_VUART_OFFSET; + p->irq = pci_irq_vector(pdev, RLS_VUART_IRQ_NUM); + p->handle_irq = vuart_handle_irq; + spin_lock_init(&p->lock); + p->line = line; + p->type = PORT_16550A; + p->uartclk = TSSE_VUART_BAUD * 16; + p->iotype = UPIO_MEM; + p->ops = &vuart_ops; + p->fifosize = 128; + vuart->tx_loadsz = 128; + p->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_FIXED_PORT | + UPF_SHARE_IRQ; + p->dev = &pdev->dev; + p->private_data = tdev; + + tdev->port = (struct uart_port *)vuart; + ret = uart_add_one_port(&g_vuart_reg, p); + if (ret != 0) { + dev_err(&pdev->dev, "add port fialed.[%d]\n", ret); + goto add_port_fail; + } + return 0; +add_port_fail: + kfree(vuart); +zalloc_fail: + vuart_free_line(line); + + return ret; +} + +void vuart_uninit_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = (struct tsse_vuart *)(tdev->port); + + if (tdev->port) { + if (!vuart->shutdown) + free_irq(tdev->port->irq, tdev->port); + vuart_free_line(tdev->port->line); + uart_remove_one_port(&g_vuart_reg, tdev->port); + kfree(vuart); + } +} + +int vuart_register(void) +{ + return uart_register_driver(&g_vuart_reg); +} + +void vuart_unregister(void) +{ + uart_unregister_driver(&g_vuart_reg); +} diff --git a/drivers/crypto/montage/tsse/tsse_vuart.h b/drivers/crypto/montage/tsse/tsse_vuart.h new file mode 100644 index 000000000000..1ed43368751a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_H__ +#define __TSSE_VUART_H__ + +#include + +#define RLS_VUART_OFFSET (0x680000) +#define RLS_VUART_IRQ_NUM (10) +#define TSSE_VUART_MAX_DEV (64) +#define TSSE_VUART_BITMAP_SIZE (ALIGN(TSSE_VUART_MAX_DEV, 64) / 64) + +int vuart_register(void); +void vuart_unregister(void); +int vuart_init_port(struct pci_dev *pdev); +void vuart_uninit_port(struct pci_dev *pdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart_regs.h b/drivers/crypto/montage/tsse/tsse_vuart_regs.h new file mode 100644 index 000000000000..26fa62f5014a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_REGS_H__ +#define __TSSE_VUART_REGS_H__ + +#include +#include + +#define VUART_ID 0x0 +#define VUART_ID_MASK GENMASK(31, 0) + +#define VUART_HCR 0x10 +#define VUART_HCR_RFIFOT GENMASK(3, 2) +#define VUART_HCR_TFIFOT GENMASK(5, 4) + +#define INTRID_NONE BIT(0) +#define INTRID_CPU_LSR (BIT(2) | BIT(1)) +#define INTRID_TRIGGER_LEVEL BIT(2) +#define INTRID_RX_TIMEOUT (BIT(2) | BIT(3)) +#define INTRID_TX_EMPTY BIT(1) + +#define VUART_IIR 0x28 +#define VUART_IIR_TXEI GENMASK(0, 0) +#define VUART_IIR_RXTOI GENMASK(1, 1) +#define VUART_IIR_RXDAI GENMASK(2, 2) +#define VUART_IIR_CPUCD GENMASK(3, 3) +#define VUART_IIR_TXFI GENMASK(4, 4) +#define VUART_IIR_RXUE GENMASK(5, 5) +#define VUART_IIR_TXOE GENMASK(6, 6) + +#define VUART_FCR 0x30 +#define VUART_FCR_TFIFORST GENMASK(0, 0) +#define VUART_FCR_RFIFORST GENMASK(1, 1) +#define VUART_FCR_RFIFOT GENMASK(3, 2) +#define VUART_FCR_TFIFOT GENMASK(5, 4) + +#define VUART_FSR 0x34 +#define VUART_FSR_TXDR GENMASK(0, 0) +#define VUART_FSR_RXDR GENMASK(1, 1) +#define VUART_FSR_RXFIFO GENMASK(2, 2) +#define VUART_FSR_TXFIFOE GENMASK(3, 3) +#define VUART_FSR_RXFIFOF GENMASK(4, 4) +#define VUART_FSR_TXFIFOF GENMASK(5, 5) +#define VUART_FSR_TFIFODN GENMASK(13, 6) +#define VUART_FSR_RFIFODN GENMASK(21, 14) +#define VUART_FSR_TXOE GENMASK(23, 23) +#define VUART_FSR_RXUE GENMASK(24, 24) + +#define VUART_SCR 0x3c +#define VUART_SCR_SCR GENMASK(7, 0) + +#define VUART_TX 0x40 +#define VUART_RX 0x40 + +#define VUART_IER 0x48 +#define VUART_IER_HETXEI GENMASK(0, 0) +#define VUART_IER_HERXTOI GENMASK(1, 1) +#define VUART_IER_HETXDRI GENMASK(2, 2) +#define VUART_IER_CCFGDI GENMASK(3, 3) +#define VUART_IER_HETXFI GENMASK(4, 4) +#define VUART_IER_HETXUI GENMASK(5, 5) +#define VUART_IER_HERXOI GENMASK(6, 6) + +#define VUART_CFG 0x4c +#define VUART_CFG_CCFGD GENMASK(0, 0) + +#endif -- Gitee From 3bee1799b344480d9970e5457bfd3a8fb5bf0b6c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Dec 2023 15:14:10 +0800 Subject: [PATCH 127/953] anolis: x86/mce: Add Centaur MCA support ANBZ: #7809 Add MCA support for some Zhaoxin CPUs which use X86_VENDOR_CENTAUR as vendor ID. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2676 --- arch/x86/kernel/cpu/mce/core.c | 39 ++++++++++++--------------------- arch/x86/kernel/cpu/mce/intel.c | 3 ++- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 20ab11aec60b..211f83fbcb1b 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -482,7 +482,8 @@ int mce_usable_address(struct mce *m) /* Checks after this one are Intel/Zhaoxin-specific: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 1; if (!(m->status & MCI_STATUS_MISCV)) @@ -506,6 +507,7 @@ bool mce_is_memory_error(struct mce *m) return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -1231,7 +1233,8 @@ static noinstr bool mce_check_crashing_cpu(void) mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } @@ -1505,7 +1508,8 @@ noinstr void do_machine_check(struct pt_regs *regs) * on Intel, Zhaoxin only. */ if (m.cpuvendor == X86_VENDOR_INTEL || - m.cpuvendor == X86_VENDOR_ZHAOXIN) + m.cpuvendor == X86_VENDOR_CENTAUR || + m.cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1932,7 +1936,8 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) mce_flags.skx_repmov_quirk = 1; } - if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + if (c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All newer Zhaoxin CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. @@ -1985,21 +1990,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) -{ - struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; - } -} - static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); @@ -2047,9 +2037,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); - break; - case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; @@ -2066,6 +2053,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) mce_intel_feature_clear(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; @@ -2349,9 +2337,10 @@ static void vendor_disable_error_reporting(void) * controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || - boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; mce_disable_error_reporting(); diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f5323551c1a9..e013dd5162fc 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -93,7 +93,8 @@ static int cmci_supported(int *banks) * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) -- Gitee From eef2c2c46c16dc6cf3f470361cec87563bbb56f3 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 19:30:57 +0800 Subject: [PATCH 128/953] anolis: x86/cpufeatures: Add Zhaoxin feature bits ANBZ: #7809 Add Zhaoxin feature bits on Zhaoxin CPUs. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2675 --- arch/x86/include/asm/cpufeatures.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index bd33f6366c80..c8e03c79769c 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -156,6 +156,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -- Gitee From 246ba8806350a917c7be76598b38a4aecd656f02 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Dec 2023 15:11:35 +0800 Subject: [PATCH 129/953] anolis: x86/cpu: Add detect extended topology for Zhaoxin CPUs ANBZ: #7809 Detect the extended topology information of Zhaoxin CPUs if available. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2675 --- arch/x86/kernel/cpu/centaur.c | 8 +++++++- arch/x86/kernel/cpu/zhaoxin.c | 7 ++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 345f7d905db6..a5c01c8f8824 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -109,6 +109,9 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_centaur(struct cpuinfo_x86 *c) @@ -127,11 +130,14 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 05fa4ef63490..2126b10de796 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -79,16 +79,21 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); -- Gitee From 108d2220425904f4a4ddc90a147d4910c1418da1 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 14 Dec 2023 18:56:22 +0200 Subject: [PATCH 130/953] intel_idle: add Sierra Forest SoC support ANBZ: #8417 commit 92813fd5b1562e547120c8489137b040892fe1bc upstream. Add Sierra Forest SoC C-states, which are C1, C1E, C6S, and C6SP. Sierra Forest SoC is built with modules, each module includes 4 cores (Crestmont microarchitecture). There is one L2 cache per module, shared between the 4 cores. There is no core C6 state, but there is C6S state, which has module scope: when all 4 cores request C6S, the entire module (4 cores + L2 cache) enters the low power state. C6SP state has package scope - when all modules in the package enter C6S, the package enters the power state mode. Intel-SIG: commit 92813fd5b156 intel_idle: add Sierra Forest SoC support. Backport Intel_idle Sierra Forest SoC support. Signed-off-by: Artem Bityutskiy Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Xuchun Shang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2808 --- drivers/idle/intel_idle.c | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 45500d2d5b4b..670a041eb910 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1237,6 +1237,43 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1382,6 +1419,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1427,6 +1470,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; -- Gitee From fdeff0ab2abb4bdd8b71c2b28d59226f56ad23d6 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 16:04:55 +0800 Subject: [PATCH 131/953] anolis: ext4: remove projid limit when create hard link ANBZ: #8366 This is a temporary workaround to avoid the limitation when creating hard link cross two projids. Signed-off-by: zhangliguang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- Documentation/admin-guide/sysctl/fs.rst | 9 +++++++++ fs/ext4/namei.c | 5 ++++- fs/namei.c | 13 +++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst index a321b84eccaa..11b2dd4ef5ae 100644 --- a/Documentation/admin-guide/sysctl/fs.rst +++ b/Documentation/admin-guide/sysctl/fs.rst @@ -205,6 +205,15 @@ already own the source file, or do not have read/write access to it. This protection is based on the restrictions in Openwall and grsecurity. +hardlink_cross_projid +--------------------- + +This is a temporary workaround plan to avoid the limitation when creating +hard link cross two projids. When set to "0", hardlink creation cross +two projids is restricted. When set to "1" hardlinks can be created +cross two projids. + + protected_regular ----------------- diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index bbda587f76b8..f444fddd892e 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -50,6 +50,8 @@ #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +extern int sysctl_hardlink_cross_projid __read_mostly; + static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) @@ -3507,7 +3509,8 @@ static int ext4_link(struct dentry *old_dentry, if (err) return err; - if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && + if (!sysctl_hardlink_cross_projid && + (ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; diff --git a/fs/namei.c b/fs/namei.c index e728ba085ebe..a0c4455f8857 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1033,6 +1033,9 @@ static int sysctl_protected_hardlinks __read_mostly; static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; +int sysctl_hardlink_cross_projid __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_hardlink_cross_projid); + #ifdef CONFIG_SYSCTL static struct ctl_table namei_sysctls[] = { { @@ -1071,6 +1074,16 @@ static struct ctl_table namei_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, + { + .procname = "hardlink_cross_projid", + .data = &sysctl_hardlink_cross_projid, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + + }, { } }; -- Gitee From fc8cbbe1d7af8b9e5cc8f8bbc7afab57a6f18ee9 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 7 Mar 2018 17:12:11 +0800 Subject: [PATCH 132/953] anolis: jbd2: create jbd2-ckpt thread for journal checkpoint ANBZ: #8366 This is trying to do jbd2 checkpoint in a specific kernel thread, then checkpoint won't be under io throttle control. Signed-off-by: Joseph Qi Signed-off-by: Jiufei Xue Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/ext4/super.c | 2 ++ fs/jbd2/checkpoint.c | 11 ++++++++- fs/jbd2/journal.c | 57 +++++++++++++++++++++++++++++++++++++++++++- include/linux/jbd2.h | 17 +++++++++++++ 4 files changed, 85 insertions(+), 2 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3ea4d8f11e7b..02e0bf074e86 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4968,6 +4968,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, } set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); sbi->s_journal->j_submit_inode_data_buffers = ext4_journal_submit_inode_data_buffers; @@ -6541,6 +6542,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); } /* Flush outstanding errors before changing fs state */ diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 118699fff2f9..4f00fe701e8f 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -85,7 +85,16 @@ __releases(&journal->j_state_lock) spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { - jbd2_log_do_checkpoint(journal); + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_wait_done_checkpoint, &wait, + TASK_UNINTERRUPTIBLE); + mutex_unlock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + finish_wait(&journal->j_wait_done_checkpoint, &wait); + jbd2_debug(1, "wake up checkpoint thread.\n"); } else if (jbd2_cleanup_journal_tail(journal) == 0) { /* We were able to recover space; yay! */ ; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 19c69229ac6e..b42730168871 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -191,6 +191,9 @@ static int kjournald2(void *arg) if (journal->j_flags & JBD2_UNMOUNT) goto end_loop; + if (kthread_should_stop()) + goto end_loop; + jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", journal->j_commit_sequence, journal->j_commit_request); @@ -261,9 +264,40 @@ static int kjournald2(void *arg) return 0; } +static int jbd2_checkpoint_thread(void *arg) +{ + journal_t *journal = arg; + DEFINE_WAIT(wait); + + jbd2_debug(1, "%s\n", __func__); + journal->j_checkpoint_task = current; + +loop: + prepare_to_wait(&journal->j_wait_checkpoint, &wait, + TASK_INTERRUPTIBLE); + wake_up_all(&journal->j_wait_done_checkpoint); + schedule(); + finish_wait(&journal->j_wait_checkpoint, &wait); + + if (journal->j_flags & JBD2_UNMOUNT) + goto end_loop; + + mutex_lock(&journal->j_checkpoint_mutex); + jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); + + goto loop; + +end_loop: + journal->j_checkpoint_task = NULL; + wake_up_all(&journal->j_wait_done_checkpoint); + jbd2_debug(1, "%s exiting.\n", __func__); + return 0; +} + static int jbd2_journal_start_thread(journal_t *journal) { - struct task_struct *t; + struct task_struct *t, *t_ckpt; t = kthread_run(kjournald2, journal, "jbd2/%s", journal->j_devname); @@ -271,6 +305,17 @@ static int jbd2_journal_start_thread(journal_t *journal) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); + + t_ckpt = kthread_run(jbd2_checkpoint_thread, journal, "jbd2-ckpt/%s", + journal->j_devname); + if (IS_ERR(t_ckpt)) { + kthread_stop(t); + return PTR_ERR(t_ckpt); + } + + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task != NULL); + return 0; } @@ -286,6 +331,14 @@ static void journal_kill_thread(journal_t *journal) write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); + + while (journal->j_checkpoint_task) { + mutex_lock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task == NULL); + mutex_unlock(&journal->j_checkpoint_mutex); + } } /* @@ -1541,6 +1594,8 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); + init_waitqueue_head(&journal->j_wait_checkpoint); + init_waitqueue_head(&journal->j_wait_done_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0fc6c1f51262..4b52ee8ae3a1 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -844,6 +844,16 @@ struct journal_s */ wait_queue_head_t j_wait_commit; + /** + * @j_wait_done_checkpoint: Wait queue for waiting for checkpoint to complete. + */ + wait_queue_head_t j_wait_done_checkpoint; + + /** + * @j_wait_checkpoint: Wait queue to trigger checkpointing. + */ + wait_queue_head_t j_wait_checkpoint; + /** * @j_wait_updates: Wait queue to wait for updates to complete. */ @@ -1200,6 +1210,13 @@ struct journal_s int (*j_finish_inode_data_buffers) (struct jbd2_inode *); + /** + * @j_checkpoint_task: + * + * Pointer to the current checkpoint thread for this journal. + */ + struct task_struct *j_checkpoint_task; + /* * Journal statistics */ -- Gitee From c50b7fc98ce8ab22db0129f1370e14393305fa90 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 14:53:30 +0800 Subject: [PATCH 133/953] anolis: jbd2: add new "stats" proc file ANBZ: #8366 /proc/fs/jbd2/${device}/info only shows whole average statistical info about jbd2's life cycle, but it can not show jbd2 info in specified time interval and sometimes this capability is very useful for trouble shooting. For example, we can not see how rs_locked and rs_flushing grows in specified time interval, but these two indexes can explain some reasons for app's behaviours. Here we add a new "stats" proc file like /proc/diskstats, then we can implement a simple tool jbd2_stats which'll display detailed jbd2 info in specified time interval. Like below(time interval 5s): [lege@localhost ~]$ cat /proc/fs/jbd2/vdb1-8/stats 51 30 8192 0 1 241616 0 0 22 0 47158 891 942 1000 1000 [lege@localhost ~]$ gcc -o jbd2_stat jbd2_stat.c ; ./jbd2_stat Device tid trans handles locked flushing logging vdb1-8 1861 158 359 13.00 0.00 2.00 Device tid trans handles locked flushing logging vdb1-8 1974 113 389 26.00 0.00 5.00 Device tid trans handles locked flushing logging vdb1-8 2188 214 308 10.00 0.00 7.00 Device tid trans handles locked flushing logging vdb1-8 2344 156 332 19.00 0.00 4.00 Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/journal.c | 103 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 5 deletions(-) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b42730168871..a7bb5ae2c4d9 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1256,25 +1256,78 @@ static const struct seq_operations jbd2_seq_info_ops = { .show = jbd2_seq_info_show, }; -static int jbd2_seq_info_open(struct inode *inode, struct file *file) +static void *jbd2_seq_stats_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? NULL : SEQ_START_TOKEN; +} + +static void *jbd2_seq_stats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static int jbd2_seq_stats_show(struct seq_file *seq, void *v) +{ + struct jbd2_stats_proc_session *s = seq->private; + + if (v != SEQ_START_TOKEN) + return 0; + + seq_printf(seq, "%lu %lu %d %lu %lu %lu %lu %lu %lu %llu %u %u %u %d %d\n", + s->stats->ts_tid, s->stats->ts_requested, + s->journal->j_max_transaction_buffers, s->stats->run.rs_wait, + s->stats->run.rs_request_delay, s->stats->run.rs_running, + s->stats->run.rs_locked, s->stats->run.rs_flushing, + s->stats->run.rs_logging, + div_u64(s->journal->j_average_commit_time, NSEC_PER_MSEC), + s->stats->run.rs_handle_count, s->stats->run.rs_blocks, + s->stats->run.rs_blocks_logged, HZ, jiffies_to_msecs(HZ)); + return 0; +} + +static void jbd2_seq_stats_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations jbd2_seq_stats_ops = { + .start = jbd2_seq_stats_start, + .next = jbd2_seq_stats_next, + .stop = jbd2_seq_stats_stop, + .show = jbd2_seq_stats_show, +}; + +static struct jbd2_stats_proc_session *__jbd2_seq_open(struct inode *inode, + struct file *file) { journal_t *journal = pde_data(inode); struct jbd2_stats_proc_session *s; - int rc, size; + int size; s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); size = sizeof(struct transaction_stats_s); s->stats = kmalloc(size, GFP_KERNEL); if (s->stats == NULL) { kfree(s); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } spin_lock(&journal->j_history_lock); memcpy(s->stats, &journal->j_stats, size); s->journal = journal; spin_unlock(&journal->j_history_lock); + return s; +} + +static int jbd2_seq_info_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); rc = seq_open(file, &jbd2_seq_info_ops); if (rc == 0) { @@ -1285,7 +1338,6 @@ static int jbd2_seq_info_open(struct inode *inode, struct file *file) kfree(s); } return rc; - } static int jbd2_seq_info_release(struct inode *inode, struct file *file) @@ -1304,6 +1356,44 @@ static const struct proc_ops jbd2_info_proc_ops = { .proc_release = jbd2_seq_info_release, }; +static int jbd2_seq_stats_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); + + rc = seq_open(file, &jbd2_seq_stats_ops); + if (rc == 0) { + struct seq_file *m = file->private_data; + + m->private = s; + } else { + kfree(s->stats); + kfree(s); + } + return rc; +} + +static int jbd2_seq_stats_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct jbd2_stats_proc_session *s = seq->private; + + kfree(s->stats); + kfree(s); + return seq_release(inode, file); +} + +static const struct proc_ops jbd2_stats_proc_ops = { + .proc_open = jbd2_seq_stats_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = jbd2_seq_stats_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1312,12 +1402,15 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("stats", 0444, journal->j_proc_entry, + &jbd2_stats_proc_ops, journal); } } static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("stats", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } -- Gitee From 664e1c332a43a273f624004f7776f86c6c51b9c1 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 15:01:48 +0800 Subject: [PATCH 134/953] anolis: jbd2: add proc entry to control whether doing buffer copy-out ANBZ: #8366 When jbd2 tries to get write access to one buffer, and if this buffer is under writeback with BH_Shadow flag, jbd2 will wait until this buffer has been written to disk, but sometimes the time taken to wait may be much long, especially disk capacity is almost full. Here add a proc entry "force-copy", if its value is not zero, jbd2 will always do meta buffer copy-cout, then we can eliminate the unnecessary waiting time here, and reduce long tail latency for buffered-write. I construct such test case below: $cat offline.fio ; fio-rand-RW.job for fiotest [global] name=fio-rand-RW filename=fio-rand-RW rw=randrw rwmixread=60 rwmixwrite=40 bs=4K direct=0 numjobs=4 time_based=1 runtime=900 [file1] size=60G ioengine=sync iodepth=16 $cat online.fio ; fio-seq-write.job for fiotest [global] name=fio-seq-write filename=fio-seq-write rw=write bs=256K direct=0 numjobs=1 time_based=1 runtime=60 [file1] rate=50m size=10G ioengine=sync iodepth=16 With this patch: $cat /proc/fs/jbd2/sda5-8/force_copy 0 online fio almost always get such long tail latency: Jobs: 1 (f=1), 0B/s-0B/s: [W(1)][100.0%][w=50.0MiB/s][w=200 IOPS][eta 00m:00s] file1: (groupid=0, jobs=1): err= 0: pid=17855: Thu Nov 15 09:45:57 2018 write: IOPS=200, BW=50.0MiB/s (52.4MB/s)(3000MiB/60001msec) clat (usec): min=135, max=4086.6k, avg=867.21, stdev=50338.22 lat (usec): min=139, max=4086.6k, avg=871.16, stdev=50338.22 clat percentiles (usec): | 1.00th=[ 141], 5.00th=[ 143], 10.00th=[ 145], | 20.00th=[ 147], 30.00th=[ 147], 40.00th=[ 149], | 50.00th=[ 149], 60.00th=[ 151], 70.00th=[ 153], | 80.00th=[ 155], 90.00th=[ 159], 95.00th=[ 163], | 99.00th=[ 255], 99.50th=[ 273], 99.90th=[ 429], | 99.95th=[ 441], 99.99th=[3640656] $cat /proc/fs/jbd2/sda5-8/force_copy 1 online fio latency is much better. Jobs: 1 (f=1), 0B/s-0B/s: [W(1)][100.0%][w=50.0MiB/s][w=200 IOPS][eta 00m:00s] file1: (groupid=0, jobs=1): err= 0: pid=8084: Thu Nov 15 09:31:15 2018 write: IOPS=200, BW=50.0MiB/s (52.4MB/s)(3000MiB/60001msec) clat (usec): min=137, max=545, avg=151.35, stdev=16.22 lat (usec): min=140, max=548, avg=155.31, stdev=16.65 clat percentiles (usec): | 1.00th=[ 143], 5.00th=[ 145], 10.00th=[ 145], 20.00th=[ 147], | 30.00th=[ 147], 40.00th=[ 147], 50.00th=[ 149], 60.00th=[ 149], | 70.00th=[ 151], 80.00th=[ 155], 90.00th=[ 157], 95.00th=[ 161], | 99.00th=[ 239], 99.50th=[ 269], 99.90th=[ 420], 99.95th=[ 429], | 99.99th=[ 537] As to the cost: because we'll always need to copy meta buffer, will consume minor cpu time and some memory (at most 32MB for 128MB journal size). Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/journal.c | 57 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/jbd2.h | 5 ++++ 2 files changed, 62 insertions(+) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a7bb5ae2c4d9..4806e5f4982b 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -442,6 +442,9 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, } kunmap_local(mapped_data); + /* force copy-out */ + if (need_copy_out == 0 && journal->j_force_copy) + need_copy_out = 1; /* * Do we need to do a data copy? */ @@ -1394,6 +1397,57 @@ static const struct proc_ops jbd2_stats_proc_ops = { .proc_release = jbd2_seq_stats_release, }; +static int jbd2_seq_force_copy_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%u\n", journal->j_force_copy); + return 0; +} + +static int jbd2_seq_force_copy_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_force_copy_show, journal); +} + +/* Worst case buffer size needed for holding an integer. */ +#define PROC_NUMBUF 13 + +static ssize_t jbd2_seq_force_copy_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned int force_copy; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtouint(strstrip(buffer), 0, &force_copy); + if (err) + goto out; + journal->j_force_copy = force_copy; +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_force_copy_proc_ops = { + .proc_open = jbd2_seq_force_copy_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_force_copy_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1402,6 +1456,8 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("force_copy", 0644, journal->j_proc_entry, + &jbd2_force_copy_proc_ops, journal); proc_create_data("stats", 0444, journal->j_proc_entry, &jbd2_stats_proc_ops, journal); } @@ -1410,6 +1466,7 @@ static void jbd2_stats_proc_init(journal_t *journal) static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("force_copy", journal->j_proc_entry); remove_proc_entry("stats", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 4b52ee8ae3a1..5e269f91dd64 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1236,6 +1236,11 @@ struct journal_s */ struct transaction_stats_s j_stats; + /** + * @j_force_copy: if not zero, force to do buffer copy-out. + */ + unsigned int j_force_copy; + /** * @j_failed_commit: Failed journal commit ID. */ -- Gitee From 8bcb8b8958f430ac8d59e30e63fbbe422224f29a Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 15:51:33 +0800 Subject: [PATCH 135/953] anolis: jbd2: track slow handle which is preventing transaction committing ANBZ: #8366 While transaction is going to commit, it first sets its state to be T_LOCKED and waits all outstanding handles to complete, and the committing transaction will always be in locked state so long as it has outstanding handles, also the whole fs will be locked and all later fs modification operations will be stucked in wait_transaction_locked(). It's hard to tell why handles are that slow, so here we add a new staic tracepoint to track such slow handle, and show io wait time and sched wait time, output likes below: fsstress-20347 [024] .... 1570.305454: jbd2_slow_handle_stats: dev 254,17 tid 15853 type 4 line_no 3101 interval 126 sync 0 requested_blocks 24 dirtied_blocks 0 trans_wait 122 space_wait 0 sched_wait 0 io_wait 126 "trans_wait 122" means that this current committing transaction has been locked for 122ms, due to this handle is not completed quickly. From "io_wait 126", we can see that io is the major reason. In this patch, we also add a per fs control file used to determine whether a handle can be considered to be slow. /proc/fs/jbd2/vdb1-8/stall_thresh default value is 100ms, users can set new threshold by echoing new value to this file. Later I also plan to add a proc file fs per fs to record these info. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/commit.c | 1 + fs/jbd2/journal.c | 52 +++++++++++++++++++++++++++++++++++++ fs/jbd2/transaction.c | 39 ++++++++++++++++++++++++++++ include/linux/jbd2.h | 15 +++++++++++ include/trace/events/jbd2.h | 50 +++++++++++++++++++++++++++++++++++ 5 files changed, 157 insertions(+) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 5e122586e06e..3386716daf2c 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -438,6 +438,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; + WRITE_ONCE(commit_transaction->t_locked_time, jiffies); trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 4806e5f4982b..1da6b2416db0 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1448,6 +1448,54 @@ static const struct proc_ops jbd2_force_copy_proc_ops = { .proc_release = single_release, }; +static int jbd2_seq_stall_thresh_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%lu\n", journal->j_stall_thresh); + return 0; +} + +static int jbd2_seq_stall_thresh_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_stall_thresh_show, journal); +} + +static ssize_t jbd2_seq_stall_thresh_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned long long stall_thresh; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoull(strstrip(buffer), 0, &stall_thresh); + if (err) + goto out; + WRITE_ONCE(journal->j_stall_thresh, stall_thresh); +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_stall_thresh_proc_ops = { + .proc_open = jbd2_seq_stall_thresh_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_stall_thresh_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1460,6 +1508,8 @@ static void jbd2_stats_proc_init(journal_t *journal) &jbd2_force_copy_proc_ops, journal); proc_create_data("stats", 0444, journal->j_proc_entry, &jbd2_stats_proc_ops, journal); + proc_create_data("stall_thresh", 0644, journal->j_proc_entry, + &jbd2_stall_thresh_proc_ops, journal); } } @@ -1468,6 +1518,7 @@ static void jbd2_stats_proc_exit(journal_t *journal) remove_proc_entry("info", journal->j_proc_entry); remove_proc_entry("force_copy", journal->j_proc_entry); remove_proc_entry("stats", journal->j_proc_entry); + remove_proc_entry("stall_thresh", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } @@ -1761,6 +1812,7 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ + journal->j_stall_thresh = JBD2_DEFAULT_TRANS_STALL_THRESH; atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", &jbd2_trans_commit_key, 0); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 5f08b5fd105a..773ff69e9bd3 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -478,6 +478,11 @@ static handle_t *new_handle(int nblocks) return NULL; handle->h_total_credits = nblocks; handle->h_ref = 1; + handle->h_pre_start_jiffies = jiffies; +#ifdef CONFIG_SCHEDSTATS + handle->h_sched_wait_sum = current->stats.wait_sum; + handle->h_io_wait_sum = current->stats.iowait_sum; +#endif return handle; } @@ -1942,6 +1947,40 @@ int jbd2_journal_stop(handle_t *handle) wait_for_commit = 1; } + do { + unsigned long transaction_locked_time, delta; + unsigned long journal_space_wait; + u64 sched_wait, io_wait; + + transaction_locked_time = READ_ONCE(transaction->t_locked_time); + if (!transaction_locked_time) + break; + + delta = jiffies_to_msecs(jiffies - transaction_locked_time); + if (delta < READ_ONCE(journal->j_stall_thresh)) + break; + + journal_space_wait = handle->h_start_jiffies - + handle->h_pre_start_jiffies; +#ifdef CONFIG_SCHEDSTATS + sched_wait = current->stats.wait_sum - + handle->h_sched_wait_sum; + io_wait = current->stats.iowait_sum - + handle->h_io_wait_sum; +#else + sched_wait = 0; + io_wait = 0; +#endif + trace_jbd2_slow_handle_stats(journal->j_fs_dev->bd_dev, + transaction->t_tid, handle->h_type, handle->h_line_no, + jiffies - handle->h_start_jiffies, handle->h_sync, + handle->h_requested_credits, + handle->h_requested_credits - handle->h_total_credits, + delta, jiffies_to_msecs(journal_space_wait), + div_u64(sched_wait, NSEC_PER_MSEC), + div_u64(io_wait, NSEC_PER_MSEC)); + } while (0); + /* * Once stop_this_handle() drops t_updates, the transaction could start * committing on us and eventually disappear. So we must not diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 5e269f91dd64..fd24871f9889 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -497,7 +497,10 @@ struct jbd2_journal_handle unsigned int h_type: 8; unsigned int h_line_no: 16; + unsigned long h_pre_start_jiffies; unsigned long h_start_jiffies; + u64 h_sched_wait_sum; + u64 h_io_wait_sum; unsigned int h_requested_credits; unsigned int saved_alloc_context; @@ -706,6 +709,9 @@ struct transaction_s * structures associated with the transaction */ struct list_head t_private_list; + + /* When this transaction is locked */ + unsigned long t_locked_time; }; struct transaction_run_stats_s { @@ -1241,6 +1247,15 @@ struct journal_s */ unsigned int j_force_copy; + /** + * @j_stall_thresh: when transaction is locked and there are still + * outstanding handles, such handles will prevent transaction + * committing, trace these handles if they have stalled the transaction + * for @j_stall_thresh time, unit is millisecond, default 100ms. + */ +#define JBD2_DEFAULT_TRANS_STALL_THRESH 100 + unsigned long j_stall_thresh; + /** * @j_failed_commit: Failed journal commit ID. */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 5646ae15a957..5779ac0df039 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -245,6 +245,56 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks) ); +TRACE_EVENT(jbd2_slow_handle_stats, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks, + unsigned long trans_wait, unsigned long space_wait, + u64 sched_wait, u64 io_wait), + + TP_ARGS(dev, tid, type, line_no, interval, sync, + requested_blocks, dirtied_blocks, trans_wait, space_wait, + sched_wait, io_wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, tid) + __field(unsigned int, type) + __field(unsigned int, line_no) + __field(int, interval) + __field(int, sync) + __field(int, requested_blocks) + __field(int, dirtied_blocks) + __field(unsigned long, trans_wait) + __field(unsigned long, space_wait) + __field(u64, sched_wait) + __field(u64, io_wait) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->interval = interval; + __entry->sync = sync; + __entry->requested_blocks = requested_blocks; + __entry->dirtied_blocks = dirtied_blocks; + __entry->trans_wait = trans_wait; + __entry->space_wait = space_wait; + __entry->sched_wait = sched_wait; + __entry->io_wait = io_wait; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d " + "trans_wait %lu space_wait %lu sched_wait %llu io_wait %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, + __entry->sync, __entry->requested_blocks, + __entry->dirtied_blocks, __entry->trans_wait, + __entry->space_wait, __entry->sched_wait, __entry->io_wait) +); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), -- Gitee From b8786d675a7d9fe55bd7cd7c7dfab0fb8b58c426 Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:30 +0800 Subject: [PATCH 136/953] anolis: LoongArch: add kernel setvirtmap for runtime ANBZ: #8435 Signed-off-by: liuyun Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/kernel/efi.c | 163 +++++++++++++++++++++++++++++++++++- 1 file changed, 162 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 9fc10cea21e1..6f1e461fc5ac 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -23,13 +23,16 @@ #include #include +#include #include +#include static unsigned long efi_nr_tables; static unsigned long efi_config_table; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR; +static __initdata pgd_t *pgd_efi; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { @@ -49,8 +52,162 @@ void __init *efi_fdt_pointer(void) return early_memremap_ro(fdt_pointer, SZ_64K); } +static int __init efimap_populate_hugepages( + unsigned long start, unsigned long end, + pgprot_t prot) +{ + unsigned long addr; + unsigned long next; + pmd_t entry; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (pud_none(*pud)) { + void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + + if (!p) + return -1; + pmd_init(p); + pud_populate(&init_mm, pud, p); + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + entry = pfn_pmd((addr >> PAGE_SHIFT), prot); + entry = pmd_mkhuge(entry); + set_pmd_at(&init_mm, addr, pmd, entry); + } + } + return 0; +} + +static void __init efi_map_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pgd_efi) { + pr_err("alloc efi pgd failed!\n"); + return; + } + pgd_init(pgd_efi); + csr_write64((long)pgd_efi, LOONGARCH_CSR_PGDL); + + /* Low Memory, Cached */ + efimap_populate_hugepages(0, SZ_256M, PAGE_KERNEL); + + for_each_node_mask(node, node_possible_map) { + /* MMIO Registers, Uncached */ + efimap_populate_hugepages(SZ_256M | (node << 44), + SZ_512M | (node << 44), PAGE_KERNEL_SUC); + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* System memory, Cached */ + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + } +} + +static int __init efimap_free_pgt(unsigned long start, unsigned long end) +{ + unsigned long addr; + unsigned long next; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (!pud_present(*pud)) + continue; + pmd = pmd_offset(pud, addr); + memblock_free(pmd, PAGE_SIZE); + pud_clear(pud); + } + return 0; +} + +static void __init efi_unmap_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + for_each_node_mask(node, node_possible_map) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* Free pagetable memory */ + efimap_free_pgt(start, end); + } + + memblock_free(pgd_efi, PAGE_SIZE); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + local_flush_tlb_all(); +} + +/* + * set_virtual_map() - create a virtual mapping for the EFI memory map and call + * efi_set_virtual_address_map enter virtual for runtime service + * + * This function populates the virt_addr fields of all memory region descriptors + * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors + * are also copied to @runtime_map, and their total count is returned in @count. + */ +static int __init set_virtual_map(void) +{ + efi_status_t status; + int count = 0; + unsigned int size; + unsigned long attr; + efi_runtime_services_t *rt; + efi_set_virtual_address_map_t *svam; + efi_memory_desc_t *in, runtime_map[32]; + + size = sizeof(efi_memory_desc_t); + + for_each_efi_memory_desc(in) { + attr = in->attribute; + if (!(attr & EFI_MEMORY_RUNTIME)) + continue; + + if (attr & (EFI_MEMORY_WB | EFI_MEMORY_WT)) + in->virt_addr = TO_CACHE(in->phys_addr); + else + in->virt_addr = TO_UNCACHE(in->phys_addr); + + memcpy(&runtime_map[count++], in, size); + } + + rt = early_memremap_ro((unsigned long)efi_systab->runtime, sizeof(*rt)); + + /* Install the new virtual address map */ + svam = rt->set_virtual_address_map; + + efi_map_pgt(); + + status = svam(size * count, size, efi.memmap.desc_version, + (efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map)); + + efi_unmap_pgt(); + if (status != EFI_SUCCESS) + return -1; + + return 0; +} + void __init efi_runtime_init(void) { + efi_status_t status; + if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime) return; @@ -59,7 +216,11 @@ void __init efi_runtime_init(void) return; } - efi.runtime = (efi_runtime_services_t *)efi_systab->runtime; + status = set_virtual_map(); + if (status < 0) + return; + + efi.runtime = READ_ONCE(efi_systab->runtime); efi.runtime_version = (unsigned int)efi.runtime->hdr.revision; efi_native_runtime_setup(); -- Gitee From 509af1bc95696295ce6d49f82df1b785bb4f6f72 Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:31 +0800 Subject: [PATCH 137/953] anolis: LoongArch: Old BPI compatibility ANBZ: #8435 Signed-off-by: liuyun Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/include/asm/addrspace.h | 1 + arch/loongarch/include/asm/efi.h | 1 + arch/loongarch/kernel/Makefile | 1 + arch/loongarch/kernel/acpi.c | 7 +- arch/loongarch/kernel/efi.c | 12 +- arch/loongarch/kernel/env.c | 6 + arch/loongarch/kernel/irq.c | 25 +- arch/loongarch/kernel/legacy_boot.c | 468 +++++++++++++++++++++++++ arch/loongarch/kernel/legacy_boot.h | 90 +++++ arch/loongarch/kernel/mem.c | 26 +- arch/loongarch/kernel/numa.c | 39 ++- arch/loongarch/kernel/reset.c | 3 +- arch/loongarch/kernel/setup.c | 18 +- arch/loongarch/kernel/smp.c | 6 +- arch/loongarch/pci/acpi.c | 148 +++++++- drivers/firmware/efi/Makefile | 1 + drivers/irqchip/irq-loongarch-cpu.c | 7 +- drivers/irqchip/irq-loongson-eiointc.c | 4 +- drivers/irqchip/irq-loongson-pch-pic.c | 5 + 19 files changed, 842 insertions(+), 26 deletions(-) create mode 100644 arch/loongarch/kernel/legacy_boot.c create mode 100644 arch/loongarch/kernel/legacy_boot.h diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b24437e28c6e..60a2ce1a6531 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -124,6 +124,7 @@ extern unsigned long vm_map_base; #define PCI_IOSIZE SZ_32M #define ISA_IOSIZE SZ_16K #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) +#define ISA_PHY_IOBASE LOONGSON_LIO_BASE #define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index 91d81f9730ab..2a811f19e398 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -6,6 +6,7 @@ #define _ASM_LOONGARCH_EFI_H #include +#include void __init efi_init(void); void __init efi_runtime_init(void); diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 4fcc168f0732..10ee5fc7ac3e 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -9,6 +9,7 @@ obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ alternative.o unwind.o +obj-y += legacy_boot.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 55d6a48c76a8..58819b017ba8 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -17,6 +17,7 @@ #include #include #include +#include "legacy_boot.h" int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); @@ -58,7 +59,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } #ifdef CONFIG_SMP -static int set_processor_mask(u32 id, u32 flags) +int set_processor_mask(u32 id, u32 flags) { int cpu, cpuid = id; @@ -132,6 +133,10 @@ static void __init acpi_process_madt(void) __cpu_logical_map[i] = -1; } #endif + + if (efi_bp && bpi_version <= BPI_VERSION_V1) + legacy_madt_table_init(); + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, acpi_parse_processor, MAX_CORE_PIC); diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 6f1e461fc5ac..b132af112664 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -26,6 +26,7 @@ #include #include #include +#include "legacy_boot.h" static unsigned long efi_nr_tables; static unsigned long efi_config_table; @@ -172,6 +173,9 @@ static int __init set_virtual_map(void) efi_set_virtual_address_map_t *svam; efi_memory_desc_t *in, runtime_map[32]; + if (efi_bp) + return EFI_SUCCESS; + size = sizeof(efi_memory_desc_t); for_each_efi_memory_desc(in) { @@ -254,10 +258,12 @@ void __init efi_init(void) void *config_tables; struct efi_boot_memmap *tbl; - if (!efi_system_table) - return; + if (efi_system_table) + efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, + sizeof(*efi_systab)); + else + efi_systab = (efi_system_table_t *)efi_bp->systemtable; - efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab)); if (!efi_systab) { pr_err("Can't find EFI system table.\n"); return; diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 6b3bfb0092e6..85dbfb1256eb 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -12,6 +12,7 @@ #include #include #include +#include "legacy_boot.h" u64 efi_system_table; struct loongson_system_configuration loongson_sysconf; @@ -22,6 +23,11 @@ void __init init_environ(void) int efi_boot = fw_arg0; char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); + legacy_boot_init(fw_arg0, fw_arg1, fw_arg2); + + if (efi_bp) + return; + if (efi_boot) set_bit(EFI_BOOT, &efi.flags); else diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 883e5066ae44..22562bb6fdc4 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -20,6 +20,7 @@ #include #include #include +#include "legacy_boot.h" DEFINE_PER_CPU(unsigned long, irq_stack); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); @@ -61,6 +62,12 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) if (header->length < sizeof(struct acpi_table_mcfg)) return -EINVAL; + for (i = 0; i < MAX_IO_PICS; i++) { + msi_group[i].pci_segment = -1; + msi_group[i].node = -1; + pch_group[i].node = -1; + } + n = (header->length - sizeof(struct acpi_table_mcfg)) / sizeof(struct acpi_mcfg_allocation); mcfg = (struct acpi_table_mcfg *)header; @@ -76,14 +83,6 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) static void __init init_vec_parent_group(void) { - int i; - - for (i = 0; i < MAX_IO_PICS; i++) { - msi_group[i].pci_segment = -1; - msi_group[i].node = -1; - pch_group[i].node = -1; - } - acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } @@ -99,7 +98,7 @@ static int __init get_ipi_irq(void) void __init init_IRQ(void) { - int i; + int i, ret; #ifdef CONFIG_SMP int r, ipi_irq; static int ipi_dummy_dev; @@ -111,7 +110,13 @@ void __init init_IRQ(void) clear_csr_estat(ESTATF_IP); init_vec_parent_group(); - irqchip_init(); + if (efi_bp && bpi_version <= BPI_VERSION_V1) { + ret = setup_legacy_IRQ(); + if (ret) + panic("IRQ domain init error!\n"); + } else { + irqchip_init(); + } #ifdef CONFIG_SMP ipi_irq = get_ipi_irq(); if (ipi_irq < 0) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c new file mode 100644 index 000000000000..4b9ee3320897 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Yun Liu, liuyun@loongson.cn + * Copyright (C) 2020 Loongson Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "legacy_boot.h" + +#define MAX_CORE_PIC 256 +#define PREFIX "ACPI: " + +#define MSI_MSG_ADDRESS 0x2FF00000 +#define MSI_MSG_DEFAULT_COUNT 0xC0 + +struct boot_params *efi_bp; +struct loongsonlist_mem_map *g_mmap; +struct acpi_madt_lio_pic *acpi_liointc; +struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +struct acpi_madt_ht_pic *acpi_htintc; +struct acpi_madt_lpc_pic *acpi_pchlpc; +struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +struct irq_domain *cpu_domain; +struct irq_domain *liointc_domain; +struct irq_domain *pch_lpc_domain; +struct irq_domain *pch_msi_domain[MAX_IO_PICS]; +struct irq_domain *pch_pic_domain[MAX_IO_PICS]; + +char arcs_cmdline[COMMAND_LINE_SIZE]; +int nr_io_pics; +int bpi_version; + +struct acpi_madt_lio_pic liointc_default = { + .address = LOONGSON_REG_BASE + 0x1400, + .size = 256, + .cascade = {2, 3}, + .cascade_map = {0x00FFFFFF, 0xff000000}, +}; + +struct acpi_madt_lpc_pic pchlpc_default = { + .address = LS7A_LPC_REG_BASE, + .size = SZ_4K, + .cascade = 19, +}; + +struct acpi_madt_eio_pic eiointc_default[MAX_IO_PICS]; +struct acpi_madt_msi_pic pchmsi_default[MAX_IO_PICS]; +struct acpi_madt_bio_pic pchpic_default[MAX_IO_PICS]; + +static int +acpi_parse_lapic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + set_processor_mask(processor->id, processor->lapic_flags); + + return 0; +} + +static int bad_pch_pic(unsigned long address) +{ + if (nr_io_pics >= MAX_IO_PICS) { + pr_warn("WARNING: Max # of I/O PCH_PICs (%d) exceeded (found %d), skipping\n", + MAX_IO_PICS, nr_io_pics); + return 1; + } + if (!address) { + pr_warn("WARNING: Bogus (zero) I/O PCH_PIC address found in table, skipping!\n"); + return 1; + } + return 0; +} + +void register_default_pic(int id, u32 address, u32 irq_base) +{ + int idx, entries; + unsigned long addr; + + if (bad_pch_pic(address)) + return; + + idx = nr_io_pics; + + pchpic_default[idx].address = address; + if (idx) + pchpic_default[idx].address |= nid_to_addrbase(id) | HT1LO_OFFSET; + pchpic_default[idx].id = id; + pchpic_default[idx].version = 0; + pchpic_default[idx].size = 0x1000; + pchpic_default[idx].gsi_base = irq_base; + + msi_group[nr_io_pics].pci_segment = nr_io_pics; + pch_group[nr_io_pics].node = msi_group[nr_io_pics].node = id; + + addr = pchpic_default[idx].address; + /* Read INT_ID.int_num */ + entries = (((unsigned long)ls7a_readq(addr) >> 48) & 0xff) + 1; + pchmsi_default[idx].msg_address = MSI_MSG_ADDRESS; + pchmsi_default[idx].start = entries; + pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; + + eiointc_default[idx].cascade = 3; + eiointc_default[idx].node = id; + eiointc_default[idx].node_map = 1; + + if (idx) { + eiointc_default[idx].cascade = 0x4; + eiointc_default[0].node_map = 0x1DF; + eiointc_default[idx].node_map = 0xFE20; + } + + acpi_pchpic[idx] = &pchpic_default[idx]; + acpi_pchmsi[idx] = &pchmsi_default[idx]; + acpi_eiointc[idx] = &eiointc_default[idx]; + + nr_io_pics++; +} + +static int +acpi_parse_legacy_pch_pic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_io_apic *pch_pic = NULL; + + pch_pic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(pch_pic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + register_default_pic(pch_pic->id, pch_pic->address, + pch_pic->global_irq_base); + + return 0; +} + +__init int legacy_madt_table_init(void) +{ + /* Parse MADT LAPIC entries */ + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_CORE_PIC); + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_legacy_pch_pic, MAX_IO_PICS); + + acpi_liointc = &liointc_default; + acpi_pchlpc = &pchlpc_default; + + return 0; +} + +int setup_legacy_IRQ(void) +{ + int i, ret; + struct irq_domain *pic_domain; + + if (!acpi_eiointc[0]) + cpu_data[0].options &= ~LOONGARCH_CPU_EXTIOI; + + ret = cpuintc_acpi_init(NULL, 0); + if (ret) { + pr_err("CPU domain init error!\n"); + return -1; + } + cpu_domain = get_cpudomain(); + ret = liointc_acpi_init(cpu_domain, acpi_liointc); + if (ret) { + pr_err("Liointc domain init error!\n"); + return -1; + } + liointc_domain = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); + if (cpu_has_extioi) { + pr_info("Using EIOINTC interrupt mode\n"); + for (i = 0; i < nr_io_pics; i++) { + ret = eiointc_acpi_init(cpu_domain, acpi_eiointc[i]); + if (ret) { + pr_err("Eiointc domain init error!\n"); + return -1; + } + + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[i], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[i], 0); + } + /* HTVECINTC maybe not use */ + } else { + pr_info("Using HTVECINTC interrupt mode\n"); + ret = htvec_acpi_init(liointc_domain, acpi_htintc); + if (ret) { + pr_err("HTVECintc domain init error!\n"); + return -1; + } + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[0], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0); + } + + pic_domain = get_pchpic_irq_domain(); + if (pic_domain) + pch_lpc_acpi_init(pic_domain, acpi_pchlpc); + + return 0; +} + +/* + * Manage initrd + */ +#ifdef CONFIG_BLK_DEV_INITRD +static __init int rd_start_early(char *p) +{ + phys_initrd_start = __pa(memparse(p, NULL)); + + return 0; +} +early_param("rd_start", rd_start_early); + +static __init int rd_size_early(char *p) +{ + phys_initrd_size = memparse(p, NULL); + + return 0; +} +early_param("rd_size", rd_size_early); + +#endif + +__init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +{ + int i; + char **_fw_argv; + + _fw_argv = (char **)cmdp; + + arcs_cmdline[0] = '\0'; + for (i = 1; i < argc; i++) { + strlcat(arcs_cmdline, _fw_argv[i], COMMAND_LINE_SIZE); + if (i < (argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } + strscpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +} + +static u8 ext_listhdr_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum = (u8)(sum + *(buffer++)); + + return sum; +} + +static int parse_mem(struct _extention_list_hdr *head) +{ + g_mmap = (struct loongsonlist_mem_map *)head; + if (ext_listhdr_checksum((u8 *)g_mmap, head->length)) { + pr_err("mem checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need vbios */ +static int parse_vbios(struct _extention_list_hdr *head) +{ + struct loongsonlist_vbios *pvbios; + + pvbios = (struct loongsonlist_vbios *)head; + + if (ext_listhdr_checksum((u8 *)pvbios, head->length)) { + pr_err("vbios_addr checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need screeninfo KVM? */ +static int parse_screeninfo(struct _extention_list_hdr *head) +{ + struct loongsonlist_screeninfo *pscreeninfo; + + pscreeninfo = (struct loongsonlist_screeninfo *)head; + if (ext_listhdr_checksum((u8 *)pscreeninfo, head->length)) { + pr_err("screeninfo_addr checksum error\n"); + return -EPERM; + } + + memcpy(&screen_info, &pscreeninfo->si, sizeof(screen_info)); + return 0; +} + +static int list_find(struct boot_params *bp) +{ + struct _extention_list_hdr *fhead = NULL; + unsigned long index; + + fhead = bp->extlist; + if (!fhead) { + pr_err("the bp ext struct empty!\n"); + return -1; + } + do { + if (memcmp(&(fhead->signature), LOONGSON_MEM_SIGNATURE, 3) == 0) { + if (parse_mem(fhead) != 0) { + pr_err("parse mem failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_VBIOS_SIGNATURE, 5) == 0) { + if (parse_vbios(fhead) != 0) { + pr_err("parse vbios failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_SCREENINFO_SIGNATURE, 5) == 0) { + if (parse_screeninfo(fhead) != 0) { + pr_err("parse screeninfo failed\n"); + return -EPERM; + } + } + fhead = (struct _extention_list_hdr *)fhead->next; + index = (unsigned long)fhead; + } while (index); + return 0; +} + +unsigned int bpi_init(void) +{ + return list_find(efi_bp); +} + +static int get_bpi_version(u64 *signature) +{ + u8 data[9]; + int version = BPI_VERSION_NONE; + + data[8] = 0; + + memcpy(data, signature, sizeof(*signature)); + if (kstrtoint(&data[3], 10, &version)) + return BPI_VERSION_NONE; + return version; +} + +static void __init parse_bpi_flags(void) +{ + if (efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); +} + +__init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) +{ + int ret; + + if (!bpi || argc < 2) + return -1; + efi_bp = (struct boot_params *)bpi; + bpi_version = get_bpi_version(&efi_bp->signature); + pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); + if (bpi_version == BPI_VERSION_NONE) + panic("Fatal error, bpi ver BONE!\n"); + else if (bpi_version == BPI_VERSION_V2) + parse_bpi_flags(); + + fw_init_cmdline(argc, cmdptr); + ret = bpi_init(); + if (ret) { + pr_err("init legacy firmware error!\n"); + return -1; + } + + return 0; +} + +static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, unsigned long isa_base) +{ + int ret = 0; + unsigned long vaddr; + struct logic_pio_hwaddr *range; + + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + range->fwnode = fwnode; + range->size = ISA_IOSIZE; + range->hw_start = isa_base; + range->flags = LOGIC_PIO_CPU_MMIO; + + ret = logic_pio_register_range(range); + if (ret) { + kfree(range); + return ret; + } + + if (range->io_start != 0) { + logic_pio_unregister_range(range); + kfree(range); + return -EINVAL; + } + + vaddr = (unsigned long)(PCI_IOBASE + range->io_start); + ret = ioremap_page_range(vaddr, vaddr + range->size, range->hw_start, + pgprot_device(PAGE_KERNEL)); + return ret; +} + +static struct fwnode_handle * __init parse_isa_base(u64 *cpu_addr) +{ + struct device_node *np; + const __be32 *ranges = NULL; + int len; + struct device_node *node; + + for_each_node_by_name(np, "isa") { + node = of_node_get(np); + + if (!node) + break; + + ranges = of_get_property(node, "ranges", &len); + + if (!ranges || (ranges && len > 0)) + break; + } + if (ranges) { + ranges += 2; + *cpu_addr = of_translate_address(np, ranges); + return &np->fwnode; + } + + return NULL; +} + +static int __init register_legacy_isa_io(void) +{ + struct fwnode_handle *fwnode; + u64 cpu_addr; + + if (!acpi_disabled) { + cpu_addr = ISA_PHY_IOBASE; + fwnode = kzalloc(sizeof(*fwnode), GFP_ATOMIC); + } else { + fwnode = parse_isa_base(&cpu_addr); + } + + if (fwnode) + add_legacy_isa_io(fwnode, cpu_addr); + + return 0; +} +arch_initcall(register_legacy_isa_io); diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h new file mode 100644 index 000000000000..982bf9b1de72 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LEGACY_BOOT_H_ +#define __LEGACY_BOOT_H_ +#include +#include +#define ADDRESS_TYPE_SYSRAM 1 +#define ADDRESS_TYPE_RESERVED 2 +#define ADDRESS_TYPE_ACPI 3 +#define ADDRESS_TYPE_NVS 4 +#define ADDRESS_TYPE_PMEM 5 + +#define LOONGSON3_BOOT_MEM_MAP_MAX 128 +#define RT_MAP_START 100 +#define FIX_MAP_ENTRY 32 + +/* mask of the flags in bootparamsinterface */ +#define BPI_FLAGS_UEFI_SUPPORTED BIT(0) +#define BPI_FLAGS_SOC_CPU BIT(1) + +#define LOONGSON_DMA_MASK_BIT 64 +#define LOONGSON_MEM_SIGNATURE "MEM" +#define LOONGSON_VBIOS_SIGNATURE "VBIOS" +#define LOONGSON_EFIBOOT_SIGNATURE "BPI" +#define LOONGSON_SCREENINFO_SIGNATURE "SINFO" +#define LOONGSON_EFIBOOT_VERSION 1000 + +/* Values for Version firmware */ + +enum bpi_vers { + BPI_VERSION_NONE = 0, + BPI_VERSION_V1 = 1000, + BPI_VERSION_V2 = 1001, +}; + +struct boot_params { + u64 signature; /* {"BPIXXXXX"} */ + void *systemtable; + struct _extention_list_hdr *extlist; + u64 flags; +} __packed; + +struct _extention_list_hdr { + u64 signature; + u32 length; + u8 revision; + u8 checksum; + struct _extention_list_hdr *next; +} __packed; + +struct loongsonlist_mem_map { + struct _extention_list_hdr header; /*{"M", "E", "M"}*/ + u8 map_count; + struct _loongson_mem_map { + u32 mem_type; + u64 mem_start; + u64 mem_size; + } __packed map[LOONGSON3_BOOT_MEM_MAP_MAX]; +} __packed; + +struct loongsonlist_vbios { + struct _extention_list_hdr header; /* {VBIOS} */ + u64 vbios_addr; +} __packed; + +struct loongsonlist_screeninfo { + struct _extention_list_hdr header; + struct screen_info si; +}; +unsigned long legacy_boot_init(unsigned long argc, + unsigned long cmdptr, unsigned long bpi); +extern int bpi_version; +extern struct boot_params *efi_bp; +extern struct loongsonlist_mem_map *g_mmap; +extern int set_processor_mask(u32 id, u32 flags); +extern int __init setup_legacy_IRQ(void); +extern struct loongson_system_configuration loongson_sysconf; +extern unsigned long long smp_group[MAX_PACKAGES]; +extern int legacy_madt_table_init(void); +extern struct pch_pic *pch_pic_priv[MAX_IO_PICS]; +extern struct irq_domain *get_cpudomain(void); +extern int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern struct irq_domain *get_pchpic_irq_domain(void); +#endif diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index aed901c57fb4..5fd1bc3333bc 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -9,13 +9,35 @@ #include #include #include - +#include "legacy_boot.h" void __init memblock_init(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_start, mem_end, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + /* parse memory information */ + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + pr_info("add memory region memblock - base: 0x%llx size: 0x%llx\n", mem_start, mem_size); + memblock_add(mem_start, mem_size); + if (max_low_pfn < (mem_end >> PAGE_SHIFT)) + max_low_pfn = mem_end >> PAGE_SHIFT; + break; + } + } + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); + return; + } /* Parse memory information */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 6e65ff12d5c7..1c3ede74ea4d 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -25,6 +25,7 @@ #include #include #include +#include "legacy_boot.h" int numa_off; struct pglist_data *node_data[MAX_NUMNODES]; @@ -37,7 +38,6 @@ static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); - /* * apicid, cpu, node mappings */ @@ -301,10 +301,45 @@ static void __init add_numamem_region(u64 start, u64 end, u32 type) static void __init init_node_memblock(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_end, mem_start, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = g_mmap->map[i].mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + mem_start = PFN_ALIGN(mem_start); + mem_end = PFN_ALIGN(mem_end - PAGE_SIZE + 1); + if (mem_start >= mem_end) + break; + add_numamem_region(mem_start, mem_end, EFI_PERSISTENT_MEMORY); + break; + + case ADDRESS_TYPE_ACPI: + mem_start = PFN_ALIGN(mem_start - PAGE_SIZE + 1); + mem_end = PFN_ALIGN(mem_end); + mem_size = mem_end - mem_start; + memblock_add(mem_start, mem_size); + memblock_mark_nomap(mem_start, mem_size); + memblock_set_node(mem_start, mem_size, + &memblock.memory, 0); + memblock_reserve(mem_start, mem_size); + break; + + case ADDRESS_TYPE_RESERVED: + memblock_reserve(mem_start, mem_size); + break; + } + } + return; + } + /* Parse memory information and activate */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 1ef8c6383535..e7282e8de1cd 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -49,7 +49,8 @@ void machine_power_off(void) #endif do_kernel_power_off(); #ifdef CONFIG_EFI - efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + if (efi.reset_system) + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif while (true) { diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index d7409a3e67a5..8af027e89ad5 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -48,6 +48,7 @@ #include #include #include +#include "legacy_boot.h" #define SMBIOS_BIOSSIZE_OFFSET 0x09 #define SMBIOS_BIOSEXTERN_OFFSET 0x13 @@ -134,9 +135,22 @@ static void __init parse_cpu_table(const struct dmi_header *dm) static void __init parse_bios_table(const struct dmi_header *dm) { + int bios_extern; char *dmi_data = (char *)dm; + bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET); b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; + + if (bpi_version == BPI_VERSION_V2) { + if ((!!(efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED)) != (!!(bios_extern & LOONGSON_EFI_ENABLE))) + pr_err("There is a conflict of definitions between efi_bp->flags and smbios\n"); + return; + } + + if (bios_extern & LOONGSON_EFI_ENABLE) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); } static void __init find_tokens(const struct dmi_header *dm, void *dummy) @@ -614,7 +628,9 @@ void __init setup_arch(char **cmdline_p) pagetable_init(); bootcmdline_init(cmdline_p); parse_early_param(); - reserve_initrd_mem(); + /* The small fdt method should be skipped directly to avoid two reserved operations. */ + if (fw_arg2) + reserve_initrd_mem(); platform_init(); arch_mem_init(cmdline_p); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 378ffa78ffeb..a6f3403d20b5 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include "legacy_boot.h" int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); @@ -310,11 +311,12 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) */ void loongson_boot_secondary(int cpu, struct task_struct *idle) { - unsigned long entry; + unsigned long entry = (unsigned long)&smpboot_entry; pr_info("Booting CPU#%d...\n", cpu); - entry = __pa_symbol((unsigned long)&smpboot_entry); + if (!efi_bp) + entry = __pa_symbol((unsigned long)&smpboot_entry); cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle); cpuboot_data.thread_info = (unsigned long)task_thread_info(idle); diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 365f7de771cb..813b668450c1 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -58,13 +58,159 @@ static void acpi_release_root_info(struct acpi_pci_root_info *ci) kfree(info); } +static void arch_pci_root_validate_resources(struct device *dev, + struct list_head *resources, + unsigned long type) +{ + LIST_HEAD(list); + struct resource *res1, *res2, *root = NULL; + struct resource_entry *tmp, *entry, *entry2; + + WARN_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0); + root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource; + + list_splice_init(resources, &list); + resource_list_for_each_entry_safe(entry, tmp, &list) { + bool free = false; + resource_size_t end; + + res1 = entry->res; + if (!(res1->flags & type)) + goto next; + + /* Exclude non-addressable range or non-addressable portion */ + end = min(res1->end, root->end); + if (end <= res1->start) { + dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n", + res1); + free = true; + goto next; + } else if (res1->end != end) { + dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n", + res1, (unsigned long long)end + 1, + (unsigned long long)res1->end); + res1->end = end; + } + + resource_list_for_each_entry(entry2, resources) { + res2 = entry2->res; + if (!(res2->flags & type)) + continue; + + /* + * I don't like throwing away windows because then + * our resources no longer match the ACPI _CRS, but + * the kernel resource tree doesn't allow overlaps. + */ + if (resource_overlaps(res1, res2)) { + res2->start = min(res1->start, res2->start); + res2->end = max(res1->end, res2->end); + dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n", + res2, res1); + free = true; + goto next; + } + } + +next: + resource_list_del(entry); + if (free) + resource_list_free_entry(entry); + else + resource_list_add_tail(entry, resources); + } +} +static void arch_pci_root_remap_iospace(struct fwnode_handle *fwnode, + struct resource_entry *entry) +{ + struct resource *res = entry->res; + resource_size_t cpu_addr = res->start; + resource_size_t pci_addr = cpu_addr - entry->offset; + resource_size_t length = resource_size(res); + unsigned long port; + + if (pci_register_io_range(fwnode, cpu_addr, length)) { + res->start += ISA_IOSIZE; + cpu_addr = res->start; + pci_addr = cpu_addr - entry->offset; + length = resource_size(res); + if (pci_register_io_range(fwnode, cpu_addr, length)) + goto err; + } + + port = pci_address_to_pio(cpu_addr); + if (port == (unsigned long)-1) + goto err; + + res->start = port; + res->end = port + length - 1; + entry->offset = port - pci_addr; + + if (pci_remap_iospace(res, cpu_addr) < 0) + goto err; + + pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res); + return; +err: + res->flags |= IORESOURCE_DISABLED; +} + +static int arch_pci_probe_root_resources(struct acpi_pci_root_info *info) +{ + int ret; + struct list_head *list = &info->resources; + struct acpi_device *device = info->bridge; + struct resource_entry *entry, *tmp; + unsigned long flags; + struct resource *res; + + flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT; + ret = acpi_dev_get_resources(device, list, + acpi_dev_filter_resource_type_cb, + (void *)flags); + if (ret < 0) + dev_warn(&device->dev, + "failed to parse _CRS method, error code %d\n", ret); + else if (ret == 0) + dev_dbg(&device->dev, + "no IO and memory resources present in _CRS\n"); + else { + resource_list_for_each_entry_safe(entry, tmp, list) { + if (entry->res->flags & IORESOURCE_IO) { + res = entry->res; + res->start = PFN_ALIGN(res->start); + res->end += 1; + res->end = PFN_ALIGN(res->end); + res->end -= 1; + if (!entry->offset) { + entry->offset = LOONGSON_LIO_BASE; + res->start |= LOONGSON_LIO_BASE; + res->end |= LOONGSON_LIO_BASE; + } + arch_pci_root_remap_iospace(&device->fwnode, + entry); + } + if (entry->res->flags & IORESOURCE_DISABLED) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_MEM); + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_IO); + } + + return ret; +} + static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) { int status; struct resource_entry *entry, *tmp; struct acpi_device *device = ci->bridge; - status = acpi_pci_probe_root_resources(ci); + status = arch_pci_probe_root_resources(ci); if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index e489fefd23da..b4528af86517 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) +#obj-$(CONFIG_LOONGARCH) += efi-init.o obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index 9d8f2c406043..e174963dc7b9 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -141,7 +141,12 @@ static int __init acpi_cascade_irqdomain_init(void) return 0; } -static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, +struct irq_domain *get_cpudomain(void) +{ + return irq_domain; +} + +int __init cpuintc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { int ret; diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index b3736bdd4b9f..34542ec7b589 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -328,7 +328,7 @@ static struct syscore_ops eiointc_syscore_ops = { .resume = eiointc_resume, }; -static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, +int __init pch_pic_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; @@ -341,7 +341,7 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, return 0; } -static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, +int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct irq_domain *parent; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 63db8e2172e0..372215f2b9ed 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -52,6 +52,11 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +struct irq_domain *get_pchpic_irq_domain(void) +{ + return pch_pic_priv[0]->pic_domain; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; -- Gitee From f40e6bed0b6268305af8d0c577e98b363c36975a Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:32 +0800 Subject: [PATCH 138/953] anolis: LoongArch: Fix virtual machine startup error ANBZ: #8435 Signed-off-by: liuyun Signed-off-by: maobibo Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/include/asm/irq.h | 1 + arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kernel/legacy_boot.c | 11 ++++--- drivers/irqchip/irq-loongson-eiointc.c | 40 +++++++++++++++++--------- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 218b4da0ea90..722eb1aa726f 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,6 +53,7 @@ struct acpi_vector_group { extern struct acpi_vector_group pch_group[MAX_IO_PICS]; extern struct acpi_vector_group msi_group[MAX_IO_PICS]; +#define MAX_CORES_PER_EIO_NODE 256 #define CORES_PER_EIO_NODE 4 #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 33531d432b49..ffa9b1c517ac 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -171,6 +171,7 @@ /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) +#define iocsr_write8(val, reg) __iocsrwr_b(val, reg) #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 4b9ee3320897..214e7e0b04af 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -211,7 +211,7 @@ int setup_legacy_IRQ(void) } pic_domain = get_pchpic_irq_domain(); - if (pic_domain) + if (pic_domain && !cpu_has_hypervisor) pch_lpc_acpi_init(pic_domain, acpi_pchlpc); return 0; @@ -373,9 +373,12 @@ __init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, efi_bp = (struct boot_params *)bpi; bpi_version = get_bpi_version(&efi_bp->signature); pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); - if (bpi_version == BPI_VERSION_NONE) - panic("Fatal error, bpi ver BONE!\n"); - else if (bpi_version == BPI_VERSION_V2) + if (bpi_version == BPI_VERSION_NONE) { + if (cpu_has_hypervisor) + pr_err(FW_BUG "Fatal error, bpi ver NONE!\n"); + else + panic(FW_BUG "Fatal error, bpi ver NONE!\n"); + } else if (bpi_version == BPI_VERSION_V2) parse_bpi_flags(); fw_init_cmdline(argc, cmdptr); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 34542ec7b589..79715b9f80d6 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -56,7 +56,9 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + return cpu_logical_map(cpu) / cores; } static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) @@ -87,6 +89,11 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, static DEFINE_RAW_SPINLOCK(affinity_lock); +static void virt_extioi_set_irq_route(int irq, unsigned int cpu) +{ + iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + irq); +} + static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int cpu; @@ -109,16 +116,22 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + if (cpu_has_hypervisor) { + iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); + virt_extioi_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -145,13 +158,14 @@ static int eiointc_router_init(unsigned int cpu) uint32_t data; uint32_t node = cpu_to_eio_node(cpu); int index = eiointc_index(node); + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); if (index < 0) { pr_err("Error: invalid nodemap!\n"); return -1; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { @@ -168,7 +182,7 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ if (index == 0) - bit = BIT(cpu_logical_map(0)); + bit = (cpu_has_hypervisor ? cpu_logical_map(0) : BIT(cpu_logical_map(0))); else bit = (eiointc_priv[index]->node << 4) | 1; -- Gitee From 0da648d2531803e91902cde57dc9104ac7fa3b12 Mon Sep 17 00:00:00 2001 From: yangqiming Date: Sat, 2 Dec 2023 10:08:33 +0800 Subject: [PATCH 139/953] anolis: LoongArch: Fixed EIOINTC structure members ANBZ: #8435 Resolve the problem that the multi-node cpus fail to boot. Signed-off-by: yangqiming Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/kernel/legacy_boot.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 214e7e0b04af..35a0a118486f 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -93,13 +93,15 @@ static int bad_pch_pic(unsigned long address) void register_default_pic(int id, u32 address, u32 irq_base) { - int idx, entries; + int j, idx, entries, cores; unsigned long addr; + u64 node_map = 0; if (bad_pch_pic(address)) return; idx = nr_io_pics; + cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); pchpic_default[idx].address = address; if (idx) @@ -119,14 +121,29 @@ void register_default_pic(int id, u32 address, u32 irq_base) pchmsi_default[idx].start = entries; pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; - eiointc_default[idx].cascade = 3; + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + node_map |= (1 << node); + } + eiointc_default[idx].cascade = 3 + idx; eiointc_default[idx].node = id; - eiointc_default[idx].node_map = 1; + eiointc_default[idx].node_map = node_map; if (idx) { - eiointc_default[idx].cascade = 0x4; - eiointc_default[0].node_map = 0x1DF; - eiointc_default[idx].node_map = 0xFE20; + int i; + + for (i = 0; i < idx + 1; i++) { + node_map = 0; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + if (((node & 7) < 4) ? !i : i) + node_map |= (1 << node); + } + eiointc_default[i].node_map = node_map; + } } acpi_pchpic[idx] = &pchpic_default[idx]; -- Gitee From 5b28fe028bd2c3c93c5e08f2a989e9493304c7f7 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Dec 2023 10:08:34 +0800 Subject: [PATCH 140/953] anolis: LoongArch: use arch specific phys_to_dma ANBZ: #8435 To be compatible with OLD firmware which has no _DMA method, we should use arch specific phys_to_dma. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/Kconfig | 1 + arch/loongarch/kernel/dma.c | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 9fd8644a9a4c..edbe1915e122 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -144,6 +144,7 @@ config LOONGARCH select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_STACKPROTECTOR + select ARCH_HAS_PHYS_TO_DMA select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index 7a9c6a9dd2d0..cc0ccde58db8 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -4,6 +4,28 @@ */ #include #include +#include + +/* + * We extract 4bit node id (bit 44~47) from Loongson-3's + * 48bit physical address space and embed it into 40bit. + */ + +static int node_id_offset; + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + long nid = (paddr >> 44) & 0xf; + + return ((nid << 44) ^ paddr) | (nid << node_id_offset); +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + long nid = (daddr >> node_id_offset) & 0xf; + + return ((nid << node_id_offset) ^ daddr) | (nid << 44); +} void acpi_arch_dma_setup(struct device *dev) { @@ -11,6 +33,11 @@ void acpi_arch_dma_setup(struct device *dev) u64 mask, end = 0; const struct bus_dma_region *map = NULL; + if (node_id_offset == 0) { + node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF); + node_id_offset += 36; + } + ret = acpi_dma_get_range(dev, &map); if (!ret && map) { const struct bus_dma_region *r = map; -- Gitee From a895566070f8d980785908b3e2905e8a8be34748 Mon Sep 17 00:00:00 2001 From: liuyun Date: Mon, 11 Dec 2023 10:03:09 +0800 Subject: [PATCH 141/953] anolis: cpufreq:loongarch: Add cpufreq driver for LoongArch ANBZ: #8435 This patch dd cpufreq driver support for LoongArch. Signed-off-by: zhangtianyang Signed-off-by: liuyun Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/Kconfig | 1 + arch/loongarch/configs/loongson3_defconfig | 4 + arch/loongarch/include/asm/fpu.h | 13 +- drivers/cpufreq/Kconfig | 11 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/loongson3-acpi-cpufreq.c | 1527 ++++++++++++++++++++ 6 files changed, 1556 insertions(+), 1 deletion(-) create mode 100644 drivers/cpufreq/loongson3-acpi-cpufreq.c diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index edbe1915e122..1463213f3315 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -654,6 +654,7 @@ config ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE def_bool y +source "drivers/cpufreq/Kconfig" source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index a3b52aaa83b3..5bc8803dee3e 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -61,6 +61,10 @@ CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index c2d8962fda00..4d635b8e3245 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -48,6 +48,10 @@ static inline void disable_lasx(void); static inline void save_lasx(struct task_struct *t); static inline void restore_lasx(struct task_struct *t); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ +DECLARE_PER_CPU(unsigned long, msa_count); +DECLARE_PER_CPU(unsigned long, lasx_count); +#endif /* * Mask the FCSR Cause bits according to the Enable bits, observing * that Unimplemented is always enabled. @@ -210,6 +214,9 @@ static inline void enable_lsx(void) { if (cpu_has_lsx) csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(msa_count, raw_smp_processor_id())++; +#endif } static inline void disable_lsx(void) @@ -256,8 +263,12 @@ static inline void restore_lsx_upper(struct task_struct *t) {} static inline void enable_lasx(void) { - if (cpu_has_lasx) + if (cpu_has_lasx) { csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(lasx_count, raw_smp_processor_id())++; +#endif + } } static inline void disable_lasx(void) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f429b9b37b76..b14584bfdf3f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -273,6 +273,17 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_ACPI_CPUFREQ + bool "Loongson3 ACPI cpufreq driver" + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + This driver supports Loongson 3A5000 compatible CPUs. + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ef8510774913..076ea3ac1b56 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -104,6 +104,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c new file mode 100644 index 000000000000..018b529a0cf9 --- /dev/null +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -0,0 +1,1527 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * loongson3-acpi-cpufreq.c - Loongson ACPI Processor P-States Driver + * + * Copyright (C) 2020 lvjianmin + * Yijun + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "cpufreq_governor.h" + +#include +#define CPU_ID_FIELD 0xf + +#define COMPLETE_STATUS 0x80000000 +#define VOLTAGE_COMMAND 0x21 + +#define DVFS_INFO 0x22 +#define DVFS_INFO_BOOST_LEVEL 0x23 +#define DVFS_INFO_MIN_FREQ 0xf +#define DVFS_INFO_MAX_FREQ 0xf0 +#define DVFS_INFO_BOOST_CORE_FREQ 0xff00 +#define DVFS_INFO_NORMAL_CORE_UPPER_LIMIT 0xf0000 +#define DVFS_INFO_BOOST_CORES 0xf00000 + +#define BOOST_MODE 0x80000 +#define NORMAL_MODE 0x40000 + +MODULE_DESCRIPTION("Loongson 3A5000 ACPI Processor P-States Driver"); + +MODULE_LICENSE("GPL"); + +#define CPUFREQ_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) +#define LOONGSON_CONTROL_MASK (0xFF) +#define FACTOR (0xeac0c6e8) +#define BOOST_THRESHOLD (900) +#define MAX_CORES_PER_PACKAGE 64 +#define CPU_ID_FIELD 0xf +#define VOLTAGE_COMMAND 0x21 +#define MAX_READY_TIMEOUT 300000000 +#define RESERVED_FREQ 3 + +#define LOONGSON_BOOST_FREQ_MASK (0x7 << 8) +#define FREQ_STEP (25) + +static struct mutex boost_mutex[MAX_PACKAGES]; +static bool cpufreq_has_boost_freq; +static int max_boost_cores; +static int boost_gears; +static int boost_freqs[NR_CPUS + 1]; +struct package_data; +struct core_data; +static struct acpi_processor_performance __percpu *acpi_perf_data; +static struct cpufreq_driver loongson3_cpufreq_driver; +static struct freq_attr *loongson3_cpufreq_attr[]; +DECLARE_PER_CPU(struct clock_event_device, stable_clockevent_device); +static inline struct core_data *get_core_data(int cpu); + +static int min_freq_level; +static int max_freq_level; +static int max_upper_index; +static int max_boost_freq; + +/* threshold of core's get into msa */ +static int msa_count_threshold = 200; +/* threshold of core's get into lasx */ +static int lasx_count_threshold = 200; +/* other cores' upper load threshold when 1 core get into boost mode and enable msa/lasx */ +static int load_threshold = 60; + +DEFINE_PER_CPU(unsigned long, msa_count); +EXPORT_PER_CPU_SYMBOL(msa_count); + +#if defined(CONFIG_CPU_HAS_LASX) +DEFINE_PER_CPU(unsigned long, lasx_count); +EXPORT_PER_CPU_SYMBOL(lasx_count); +#endif + +struct ce_update_data { + struct clock_event_device *cd; + unsigned int new_freq; +}; + +static struct kthread_worker cpufreq_worker; +static struct task_struct *cpufreq_thread; +/** + * struct core_data - Store core related information + * @in_boost: the core is boosting to boost_freq + * @cpu: logical cpu of the core + * @update_util The update_util_data pointer of @cpu, is passed to the callback + * function, which will be called by cpufreq_update_util() + * @package The package_data structure the core belonged to + * @work_in_progress @work is busy + * @irq_work to enqueue callback handling on irq workqueue + * @work to enqueue work from irq workqueue on system workqueue + * @perf store frequency table related information from ACPI table + * @max_freq max normal freq of cpu + * @boost_freq max boost freq of cpu + * @clock_scale clock scale to calculate cpu_data[cpu].udelay_val in boost mode + * @package_id package id of core + * @shift clock shift to calculate cpu_data[cpu].udelay_val in boost mode + * @update_util_set if callback has been set for cpufreq_update_util() + * @load current load of the core + * @last_freq_update_time last freq update time + * @freq_update_delay_ns min interval of freq update, which is + * transition_latency configured in ACPI table + * + * following elements are used to calculate load of the core + * @prev_update_time + * @prev_cpu_idle + * @prev_load + * @sampling_rate + * + */ +struct core_data { + bool in_boost; + int cpu; + struct update_util_data update_util; + struct package_data *package; + bool work_in_progress; + struct irq_work irq_work; + struct kthread_work work; + struct acpi_processor_performance *perf; + unsigned int normal_max_freq; + unsigned int *boost_freq; + unsigned int *clock_scale; + unsigned int package_id; + unsigned int *shift; + bool update_util_set; + unsigned long long load; + + u64 last_freq_update_time; + s64 freq_update_delay_ns; + u64 prev_update_time; + u64 prev_cpu_idle; + u32 prev_load; + u32 sampling_rate; +}; + +struct package_data { + int boost_cores; + int max_boost_cores; + int nr_cores; + char in_boost; + int nr_full_load_cores; + struct core_data core[MAX_CORES_PER_PACKAGE]; +} all_package_data[MAX_PACKAGES]; + +static bool boost_supported(void) +{ + return loongson3_cpufreq_driver.set_boost; +} + +/* + * Check if target_freq is a boost freq + * + * target_freq must be a freq in freq table when + * calling the function. + */ +static int boost_level(struct acpi_processor_performance *perf, unsigned int target_freq) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + } + return 0; +} + +#ifdef CONFIG_SMP +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + struct ce_update_data __maybe_unused ce_data; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->policy->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, freqs->new); + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + cpufreq_scale(loops_per_jiffy, boost_freqs[cur_boost_level] * 1000, + freqs->new)) / core->shift[cur_boost_level]); + } else { + lpj_fine = + cpufreq_scale(loops_per_jiffy, core->normal_max_freq * 1000, freqs->new); + } + } + + return 0; +} +#else +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, target_freq); + + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + loops_per_jiffy) / core->shift[cur_boost_level]); + } else { + lpj_fine = loops_per_jiffy; + } + } + + return 0; +} +#endif +static struct notifier_block loongson3_cpufreq_notifier_block = { + .notifier_call = loongson3_cpu_freq_notifier +}; + +static int cpufreq_perf_find_level(struct acpi_processor_performance *perf, + unsigned int target_freq, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control & LOONGSON_CONTROL_MASK; + } + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control; + } + } + return 0; +} + +static int cpufreq_perf_find_freq(struct acpi_processor_performance *perf, + unsigned int target_index, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) + if (target_index == (perf->states[i].control & LOONGSON_CONTROL_MASK)) + return perf->states[i].core_frequency; + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_index == perf->states[i].control) + return perf->states[i].core_frequency; + } + } + + return 0; +} + + +static inline struct core_data *get_core_data(int cpu) +{ + int package_id = cpu_data[cpu].package; + struct package_data *package = &all_package_data[package_id]; + int core_id = cpu_logical_map(cpu) % package->nr_cores; + + return &package->core[core_id]; +} + +static bool package_boost(struct package_data *package) +{ + int i; + int cur_full_load = 0; + +#if defined(CONFIG_CPU_HAS_LASX) + int lasx_enable_count = 0; + unsigned long lasx_num; + bool clear_lasx = false; +#endif + + int msa_enable_count = 0; + unsigned long msa_num; + bool clear_msa = false; + + for (i = 0; i < package->nr_cores; i++) { + +#if defined(CONFIG_CPU_HAS_LASX) + lasx_num = per_cpu(lasx_count, package->core[i].cpu); + + if (lasx_num) + lasx_enable_count++; + + if (lasx_num >= lasx_count_threshold) + clear_lasx = true; + + pr_debug("%s: lasx enabled, i %d, cpu %d, lasx_num %lu\n", + __func__, i, package->core[i].cpu, lasx_num); +#endif + msa_num = per_cpu(msa_count, package->core[i].cpu); + + if (msa_num) + msa_enable_count++; + + if (msa_num >= msa_count_threshold) + clear_msa = true; + + pr_debug("%s: msa enabled, i %d, cpu %d, msa_num %lu\n", + __func__, i, package->core[i].cpu, msa_num); + + if (package->core[i].prev_load >= load_threshold) + cur_full_load++; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (clear_lasx) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(lasx_count, package->core[i].cpu) = 0; + } +#endif + + if (clear_msa) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(msa_count, package->core[i].cpu) = 0; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (lasx_enable_count > 1 + || (lasx_enable_count && package->nr_full_load_cores > 1) + || (lasx_enable_count && cur_full_load > 1)) { + return false; + } +#endif + + if (msa_enable_count > 1 + || (msa_enable_count && package->nr_full_load_cores > 1) + || (msa_enable_count && cur_full_load > 1)) { + return false; + } + + if (package->nr_full_load_cores && + package->nr_full_load_cores <= package->max_boost_cores) + return true; + + return false; +} + +/* + * check if the cpu can be boosted. + * + * call the function after load of cpu updated. + */ +static bool cpu_can_boost(int cpu) +{ + struct core_data *core = get_core_data(cpu); + struct package_data *package = core->package; + + if (package->boost_cores >= package->max_boost_cores) + return false; + if (core->load > BOOST_THRESHOLD) + return true; + + return false; +} + +static void do_set_freq_level(int cpu, int freq_level) +{ + uint32_t message; + uint32_t val; + + message = (0 << 31) | (VOLTAGE_COMMAND << 24) + | ((uint32_t)freq_level << 4) + | (cpu & CPU_ID_FIELD); + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); +} + +static int wait_for_ready_timeout(int64_t timeout) +{ + int ret; + struct timespec64 prev_ts; + struct timespec64 curr_ts; + ktime_t delay = ktime_set(0, 100); + + ktime_get_ts64(&prev_ts); + ktime_get_ts64(&curr_ts); + + ret = -EPERM; + while (((curr_ts.tv_sec - prev_ts.tv_sec) * 1000000000 + (curr_ts.tv_nsec - prev_ts.tv_nsec)) < timeout) { + ktime_get_ts64(&curr_ts); + + if (iocsr_read32(0x51c) & COMPLETE_STATUS) { + ret = 0; + break; + } + + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + } + return ret; +} + +/* Find closest freq to target in a table in ascending order */ +static int cpufreq_table_find_freq_ac(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + if (freq == target_freq) + return freq; + + if (freq < target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found below target_freq, return freq above target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return freq; + + return best_freq; + } + + return best_freq; +} + +/* Find closest freq to target in a table in descending order */ +static int cpufreq_table_find_freq_dc(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + + if (freq == target_freq) + return freq; + + if (freq > target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found above target_freq, return freq below target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return freq; + return best_freq; + } + + return best_freq; +} + +/* Works only on sorted freq-tables */ +static int cpufreq_table_find_freq(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_freq_ac(policy, target_freq, boost_level); + else + return cpufreq_table_find_freq_dc(policy, target_freq, boost_level); +} + +static void transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, bool failed) +{ + if (unlikely(!policy->transition_ongoing)) + return; + cpufreq_freq_transition_end(policy, freqs, failed); +} +static void transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + if (unlikely(policy->transition_ongoing)) + cpufreq_freq_transition_end(policy, freqs, true); + + cpufreq_freq_transition_begin(policy, freqs); +} + +static void update_core_boost_info(struct core_data *core, bool boost_set) +{ + core->in_boost = boost_set; + if (boost_set) + core->package->boost_cores++; + else + core->package->boost_cores--; +} + +static unsigned int cores_freq_trans_notify(struct package_data *package, + bool before_trans, + bool trans_failed, + int find_level, + int find_freq, + unsigned int skip_cpumask) +{ + int i; + struct cpufreq_policy *policy; + struct cpufreq_freqs freqs; + unsigned int cores_level = 0; + unsigned int core_level; + + for (i = 0; i < package->nr_cores; i++) { + struct core_data *core = &package->core[i]; + + policy = cpufreq_cpu_get_raw(core->cpu); + if (((1 << i) & skip_cpumask) || !policy) + continue; + freqs.old = policy->cur; + freqs.flags = 0; + + /* find level from normal levels */ + core_level = cpufreq_perf_find_level(core->perf, policy->cur, find_level); + if (!core_level) { + pr_debug("cpu%d policy->cur=%d find_level=%d freq=%d skip_cpumask=%x \n", + policy->cpu, policy->cur, + find_level, find_freq, skip_cpumask); + } + freqs.new = cpufreq_perf_find_freq(core->perf, core_level, find_freq) * 1000; + if (!freqs.new) + pr_debug("%s: find freq error\n", __func__); + + pr_debug("%s: cpu %d, old freq %d, new freq %d, find_level %d, find_freq %d\n", + __func__, policy->cpu, freqs.old, freqs.new, find_level, find_freq); + cores_level |= (core_level << (i << 2)); + + if (before_trans) + transition_begin(policy, &freqs); + else + transition_end(policy, &freqs, trans_failed); + } + return cores_level; +} +static int loongson3_set_freq(struct core_data *core, unsigned long freq, int boost_level) +{ + int ret = 0; + int freq_level; + int phy_cpu; + int target_freq; + struct cpufreq_freqs freqs; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(core->cpu); + + if (!policy) + return -EINVAL; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + phy_cpu = cpu_logical_map(core->cpu); + target_freq = cpufreq_table_find_freq(policy, freq, boost_level); + if (!target_freq) + return -1; + if (target_freq == policy->cur) + return -1; + + freqs.flags = 0; + freqs.old = policy->cur; + freqs.new = target_freq; + freq_level = cpufreq_perf_find_level(core->perf, target_freq, boost_level); + if (!freq_level) { + pr_debug("%s: cpu%d freq=%lu targetfreq=%d boost_level=%d find level error\n", + __func__, core->cpu, freq, target_freq, boost_level); + } + + transition_begin(policy, &freqs); + do_set_freq_level(phy_cpu, freq_level); + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + transition_end(policy, &freqs, !!ret); + + return ret; +} + +int loongson3_set_mode(int mode, int freq_level) +{ + uint32_t val; + int ret = 0; + uint32_t message; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + message = mode | (VOLTAGE_COMMAND << 24) | freq_level; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + val |= 1 << 10; + iocsr_write32(val, 0x420); + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +enum freq_adjust_action { + FAA_NORMAL, + FAA_N2B, + FAA_B2N, + FAA_BOOST, +}; + +static int faa_normal(struct cpufreq_policy *policy, int load) +{ + int ret; + unsigned int freq_next, min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + + if (!core) + return -1; + + min_f = policy->min; + max_f = policy->max; + freq_next = min_f + load * (max_f - min_f) / 100; + ret = loongson3_set_freq(core, freq_next, 0); + return ret; +} + +static void handle_boost_cores(struct core_data *core, struct package_data *package, + unsigned long target_freq, bool skip_update_and_notify, bool update_core, bool inc_boost) +{ + int boost_level; + int find_level; + int find_freq; + int ret; + int inc_core = inc_boost ? 1 : -1; + + if (boost_gears == 1) { + find_level = 0; + boost_level = boost_gears; + } else { + find_level = package->boost_cores; + if (update_core) + boost_level = package->boost_cores + inc_core; + else + boost_level = package->boost_cores; + } + find_freq = boost_level; + ret = loongson3_set_freq(core, target_freq, boost_level); + if (ret) + return; + + if (skip_update_and_notify) { + if (update_core) + update_core_boost_info(core, inc_boost); + return; + } + + if (boost_gears != 1) { + cores_freq_trans_notify(package, true, false, + find_level, find_freq, 1 << core->cpu); + cores_freq_trans_notify(package, false, false, + find_level, find_freq, 1 << core->cpu); + } + if (update_core) + update_core_boost_info(core, inc_boost); +} + +static void faa_boost(struct cpufreq_policy *policy, int load) +{ + unsigned int min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + struct package_data *package = core->package; + unsigned long target_freq; + + /* boost cores form n to n + 1 */ + if (core->load > BOOST_THRESHOLD) { + if (package->boost_cores < package->max_boost_cores + && !core->in_boost) { + if (boost_gears == 1) { + target_freq = policy->max; + } else { + target_freq = cpufreq_table_find_freq(policy, policy->max, + package->boost_cores + 1); + if (!target_freq) { + pr_debug("%s: find freq error ,boost_level %d, cur freq %d\n", + __func__, package->boost_cores, policy->max); + } + } + handle_boost_cores(core, package, target_freq, false, true, true); + } + } else { + /* 1. core not in boost, level up but not change pll + * 2. core in boost, boost cores from n to n - 1 + */ + min_f = policy->min; + max_f = policy->max; + target_freq = min_f + load * (max_f - min_f) / 100; + handle_boost_cores(core, package, target_freq, !core->in_boost, core->in_boost, false); + } + + +} + +static void get_boost_cores(struct package_data *package, int *boost_cores, int *boost_count) +{ + struct core_data *core; + struct cpufreq_policy *policy; + int i; + + /* count boost cores */ + for (i = 0; i < package->nr_cores; i++) { + core = &package->core[i]; + policy = cpufreq_cpu_get_raw(core->cpu); + if (!policy) + continue; + + if (cpu_can_boost(core->cpu)) { + if (boost_cores) + *boost_cores |= (1 << i); + + (*boost_count)++; + } + } +} + +static void faa_n2b(struct package_data *package, struct core_data *core) +{ + int boost_cores = 0; + int boost_count = 0; + int freq_level; + int i; + + get_boost_cores(package, &boost_cores, &boost_count); + + if (boost_gears == 1) + boost_count = 1; + + freq_level = cores_freq_trans_notify(package, true, false, + 0, boost_count, 0); + if (!loongson3_set_mode(BOOST_MODE, freq_level)) { + cores_freq_trans_notify(package, false, false, + 0, boost_count, 0); + package->in_boost = true; + for (i = 0; i < package->nr_cores; i++) { + if (boost_cores & (1 << i)) + update_core_boost_info(&package->core[i], true); + } + } else + cores_freq_trans_notify(package, false, true, + 0, boost_count, 0); +} + +static void faa_b2n(struct package_data *package) +{ + int i; + int boost_count = package->boost_cores; + + if (boost_gears == 1) + boost_count = 1; + + cores_freq_trans_notify(package, true, false, + boost_count, 0, 0); + if (!loongson3_set_mode(NORMAL_MODE, 0)) { + cores_freq_trans_notify(package, false, false, + boost_count, 0, 0); + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].in_boost) + update_core_boost_info(&package->core[i], false); + } + package->in_boost = false; + } else + cores_freq_trans_notify(package, false, true, + boost_count, 0, 0); +} + + +unsigned int load_update(struct core_data *core) +{ + int i; + u64 update_time, cur_idle_time; + unsigned int idle_time, time_elapsed; + unsigned int load = 0; + struct package_data *package = core->package; + + cur_idle_time = get_cpu_idle_time(core->cpu, &update_time, true); + + time_elapsed = update_time - core->prev_update_time; + core->prev_update_time = update_time; + + idle_time = cur_idle_time - core->prev_cpu_idle; + core->prev_cpu_idle = cur_idle_time; + + if (unlikely(!time_elapsed)) { + /* + * That can only happen when this function is called + * twice in a row with a very short interval between the + * calls, so the previous load value can be used then. + */ + load = core->prev_load; + } else if (unlikely((int)idle_time > 2 * core->sampling_rate && + core->prev_load)) { + + load = core->prev_load; + core->prev_load = 0; + } else { + if (time_elapsed >= idle_time) + load = 100 * (time_elapsed - idle_time) / time_elapsed; + else + load = (int)idle_time < 0 ? 100 : 0; + core->prev_load = load; + } + + package->nr_full_load_cores = 0; + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].load > BOOST_THRESHOLD) + package->nr_full_load_cores++; + } + + return load; +} + +static bool cpufreq_should_update_freq(struct core_data *core, u64 time) +{ + s64 delta_ns; + + delta_ns = time - core->last_freq_update_time; + return delta_ns >= core->freq_update_delay_ns; +} + +static void cpufreq_update(struct cpufreq_policy *policy) +{ + int action; + struct core_data *core; + struct package_data *package; + unsigned long int load; + bool should_be_boost = 0; + + core = get_core_data(policy->cpu); + package = core->package; + + mutex_lock(&boost_mutex[core->package_id]); + + if (!core->update_util_set) { + mutex_unlock(&boost_mutex[core->package_id]); + return; + } + + load = load_update(core); + core->load = (u64)load + ((core->load * FACTOR) >> 32); + + if (cpufreq_boost_enabled()) { + should_be_boost = package_boost(package); + } else { + if (package->in_boost) + should_be_boost = false; + } + + action = (package->in_boost << 1) | should_be_boost; + switch (action) { + case FAA_NORMAL: + faa_normal(policy, load); + break; + case FAA_B2N: + faa_b2n(package); + break; + case FAA_N2B: + faa_n2b(package, core); + break; + case FAA_BOOST: + faa_boost(policy, load); + break; + } + mutex_unlock(&boost_mutex[core->package_id]); +} + +static void set_max_within_limits(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + /* + * policy->max <= cpu->pstate.max_freq indecates that + * the boost is disabled, so max freq is in normal range + * + * Skip performance policy with boost enabled!!! + * + */ + if (policy->max <= (core->normal_max_freq * 1000)) { + mutex_lock(&boost_mutex[core->package_id]); + if (!loongson3_set_freq(core, policy->max, 0)) + pr_debug("Set cpu %d to performance mode under normal range.\n", + policy->cpu); + mutex_unlock(&boost_mutex[core->package_id]); + } +} + +static void clear_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (!core->update_util_set) + return; + + cpufreq_remove_update_util_hook(cpu); + core->update_util_set = false; + synchronize_rcu(); +} + +static void update_util_handler(struct update_util_data *data, u64 time, + unsigned int flags) +{ + struct core_data *core = container_of(data, struct core_data, update_util); + + if (!cpufreq_should_update_freq(core, time)) + return; + if (!core->work_in_progress) { + core->last_freq_update_time = time; + core->work_in_progress = true; + irq_work_queue(&core->irq_work); + } +} +static void set_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (core->update_util_set) + return; + + cpufreq_add_update_util_hook(cpu, &core->update_util, + update_util_handler); + core->update_util_set = true; +} +static int loongson3_cpufreq_set_policy(struct cpufreq_policy *policy) +{ + if (!policy->cpuinfo.max_freq) + return -ENODEV; + + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + clear_update_util_hook(policy->cpu); + set_max_within_limits(policy); + } else { + set_update_util_hook(policy->cpu); + } + + return 0; +} + +static int loongson3_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); + + return 0; +} + +static void set_boost_freq(bool has) +{ + cpufreq_has_boost_freq = has; +} + +static bool has_boost_freq(void) +{ + return cpufreq_has_boost_freq; +} + +static int compute_scale(int *shift, int dividor, int dividee) +{ + int i; + int result = 0; + int remainder = 0; + int scale_resolution = 8; + + result = dividor / dividee; + remainder = (dividor % dividee) * 10; + + for (i = 0; i < scale_resolution; i++) { + result = result * 10 + remainder / dividee; + remainder = (remainder % dividee) * 10; + *shift *= 10; + } + + return result; +} + +static void cpufreq_work_handler(struct kthread_work *work) +{ + struct core_data *core; + struct cpufreq_policy *policy; + + core = container_of(work, struct core_data, work); + policy = cpufreq_cpu_get_raw(core->cpu); + + if (policy) { + cpufreq_update(policy); + core->work_in_progress = false; + } +} + +static void cpufreq_irq_work(struct irq_work *irq_work) +{ + struct core_data *core = container_of(irq_work, struct core_data, irq_work); + + kthread_queue_work(&cpufreq_worker, &core->work); +} + +static void cpufreq_kthread_stop(void) +{ + kthread_flush_worker(&cpufreq_worker); + kthread_stop(cpufreq_thread); +} +static int cpufreq_kthread_create(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_flags = 0x10000000, + .sched_nice = 0, + .sched_priority = 0, + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + kthread_init_worker(&cpufreq_worker); + cpufreq_thread = kthread_create(kthread_worker_fn, &cpufreq_worker, "lsfrq:%d", 0); + if (IS_ERR(cpufreq_thread)) + return PTR_ERR(cpufreq_thread); + + ret = sched_setattr_nocheck(cpufreq_thread, &attr); + if (ret) { + kthread_stop(cpufreq_thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); + return ret; + } + + wake_up_process(cpufreq_thread); + + return 0; +} + +static int init_acpi(struct acpi_processor_performance *perf) +{ + int result = 0; + int i; + + perf->shared_type = 0; + perf->state_count = (max_freq_level - min_freq_level + 1) * (boost_gears + 1); + + perf->states = + kmalloc_array(perf->state_count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); + + if (!perf->states) { + result = -ENOMEM; + return result; + } + + for (i = 0; i < perf->state_count; i++) { + perf->states[i].power = 0x3A98; + perf->states[i].transition_latency = 10000; + perf->states[i].bus_master_latency = 10000; + perf->states[i].status = (RESERVED_FREQ + i / (boost_gears + 1)); + perf->states[i].control = (RESERVED_FREQ + i / (boost_gears + 1)); + + switch (i % (boost_gears + 1)) { + case 0: + perf->states[i].core_frequency = (cpu_clock_freq / 1000000) * (8 - i / (boost_gears + 1)) / 8; + break; + case 1: + case 2: + case 3: + case 4: + perf->states[i].core_frequency = + boost_freqs[i % (boost_gears + 1)] * (8 - i / (boost_gears + 1)) / 8; + perf->states[i].control |= ((i % (boost_gears + 1)) << 8); + break; + default: + pr_info("%s: i %d freq table error\n", __func__, i); + } + } + + return result; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + struct acpi_processor_performance *perf; + struct cpufreq_frequency_table *freq_table; + struct core_data *core; + int package_id; + unsigned int cpu = policy->cpu; + unsigned int result = 0; + + perf = per_cpu_ptr(acpi_perf_data, cpu); + package_id = cpu_data[cpu].package; + core = get_core_data(cpu); + all_package_data[package_id].nr_cores = loongson_sysconf.cores_per_package; + all_package_data[package_id].max_boost_cores = max_boost_cores; + core->normal_max_freq = 0; + all_package_data[package_id].nr_full_load_cores = 0; + core->cpu = cpu; + core->work_in_progress = false; + core->last_freq_update_time = 0; + core->perf = perf; + core->package_id = package_id; + core->package = &all_package_data[package_id]; + + core->boost_freq = kmalloc_array(boost_gears + 1, sizeof(typeof(core->boost_freq)), GFP_KERNEL); + core->clock_scale = kmalloc_array(boost_gears + 1, sizeof(typeof(core->clock_scale)), GFP_KERNEL); + core->shift = kmalloc_array(boost_gears + 1, sizeof(typeof(core->shift)), GFP_KERNEL); + + for (i = 0; i < boost_gears + 1; i++) { + core->boost_freq[i] = boost_freqs[i]; + core->shift[i] = 1; + } + + if (!acpi_disabled) + result = acpi_processor_register_performance(perf, cpu); + else { + result = init_acpi(perf); + policy->shared_type = perf->shared_type; + } + + if (result) { + pr_info("CPU%d acpi_processor_register_performance failed.\n", cpu); + return result; + } + + for (i = 0; i < MAX_PACKAGES; i++) + mutex_init(&boost_mutex[i]); + + /* capability check */ + if (perf->state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), + GFP_KERNEL); + if (!freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i = 0; i < perf->state_count; i++) { + if ((perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) + policy->cpuinfo.transition_latency = + perf->states[i].transition_latency * 1000; + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + set_boost_freq(true); + } else { + if (perf->states[i].core_frequency > core->normal_max_freq) + core->normal_max_freq = perf->states[i].core_frequency; + } + } + + core->freq_update_delay_ns = policy->cpuinfo.transition_latency; + + for (i = 0; i < boost_gears + 1; i++) { + core->clock_scale[i] = compute_scale(&core->shift[i], boost_freqs[i], core->normal_max_freq); + pr_debug("%s: boost_freqs[%d] %d, normal_max_freq %d, scale %d, shift %d\n", + __func__, i, boost_freqs[i], core->normal_max_freq, + core->clock_scale[i], core->shift[i]); + } + + /* table init */ + for (i = 0; i < perf->state_count; i++) { + freq_table[i].driver_data = (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + if (freq_table[i].driver_data) + freq_table[i].flags |= CPUFREQ_BOOST_FREQ; + freq_table[i].frequency = + perf->states[i].core_frequency * 1000; + } + freq_table[i].frequency = CPUFREQ_TABLE_END; + policy->freq_table = freq_table; + perf->state = 0; + + /* add boost-attr if supported. */ + if (has_boost_freq() && boost_supported()) + loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + + pr_info("CPU%u - ACPI performance management activated.\n", cpu); + for (i = 0; i < perf->state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n", + (i == perf->state ? '*' : ' '), i, + (u32) perf->states[i].core_frequency, + (u32) perf->states[i].power, + (u32) perf->states[i].transition_latency, + (u32) perf->states[i].control); + + /* + * the first call to ->target() should result in us actually + * writing something to the appropriate registers. + */ + policy->fast_switch_possible = false; + + init_irq_work(&core->irq_work, cpufreq_irq_work); + kthread_init_work(&core->work, cpufreq_work_handler); + core->sampling_rate = max_t(unsigned int, + CPUFREQ_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); + return result; + +err_unreg: + if (!acpi_disabled) + acpi_processor_unregister_performance(cpu); + + return result; +} + +static int loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + + clear_update_util_hook(policy->cpu); + irq_work_sync(&core->irq_work); + kthread_cancel_work_sync(&core->work); + core->work_in_progress = false; + policy->fast_switch_possible = false; + if (!acpi_disabled) + acpi_processor_unregister_performance(policy->cpu); + kfree(policy->freq_table); + kfree(core->boost_freq); + kfree(core->clock_scale); + kfree(core->shift); + return 0; +} + +static struct freq_attr *loongson3_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if supported */ + NULL, +}; + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .verify = loongson3_cpufreq_verify_policy, + .setpolicy = loongson3_cpufreq_set_policy, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .name = "acpi-cpufreq", + .attr = loongson3_cpufreq_attr, +}; + +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + +static int __init loongson3_cpufreq_early_init(void) +{ + unsigned int i; + + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) + return -ENOMEM; + for_each_possible_cpu(i) { + if (!zalloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { + free_acpi_perf_data(); + return -ENOMEM; + } + } + return 0; +} + +static bool support_boost(void) +{ + int message; + int val; + int i; + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + message = DVFS_INFO << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: not support boost\n", __func__); + return false; + } + + val = iocsr_read32(0x51c); + + min_freq_level = val & DVFS_INFO_MIN_FREQ; + max_freq_level = (val & DVFS_INFO_MAX_FREQ) >> 4; + + if ((val & DVFS_INFO_BOOST_CORE_FREQ) && ((val & DVFS_INFO_BOOST_CORES) >> 20)) { + max_boost_cores = (val & DVFS_INFO_BOOST_CORES) >> 20; + max_boost_freq = ((val & DVFS_INFO_BOOST_CORE_FREQ) >> 8) * 25; + max_upper_index = (val & DVFS_INFO_NORMAL_CORE_UPPER_LIMIT) >> 16; + } else { + boost_gears = 0; + return false; + } + + /* Read boost levels */ + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + + /* for version 1, single boost freq boost */ + message = DVFS_INFO_BOOST_LEVEL << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: single boost mode\n", __func__); + boost_gears = 1; + boost_freqs[0] = calc_const_freq() / 1000000; + for (i = 1; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq; + + /* set 0x51c complete */ + iocsr_write32(COMPLETE_STATUS, 0x51c); + } else { + pr_info("%s: multi boost mode\n", __func__); + boost_gears = max_boost_cores; + val = iocsr_read32(0x51c); + + boost_freqs[0] = calc_const_freq() / 1000000; + boost_freqs[1] = max_boost_freq; + + if (boost_gears > 1) { + for (i = 2; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq - (((val >> ((i-2) * 4)) & 0xf) * FREQ_STEP); + } + } + + pr_info("%s: min_freq_level %d, max_freq_level %d, max_boost_cores %d, boost_gears %d\n", + __func__, min_freq_level, max_freq_level, max_boost_cores, boost_gears); + + return true; +} + +static int cpufreq_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + bool boost) +{ + struct cpufreq_frequency_table *pos; + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int freq; + + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if (!boost) { + if (pos->driver_data) + continue; + } + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} + +static int set_boost(struct cpufreq_policy *policy, int state) +{ + if (!has_boost_freq()) + return -EINVAL; + + if (!policy) + return -EINVAL; + + if (!state) { + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) + cpufreq_update(policy); + } + if (!policy->freq_table) + return -EINVAL; + + cpufreq_table_cpuinfo(policy, policy->freq_table, state); + down_write(&policy->rwsem); + up_write(&policy->rwsem); + + if (!state) + set_max_within_limits(policy); + + return 0; +} + +static void __init loongson3_cpufreq_boost_init(void) +{ + if (!support_boost()) { + pr_info("Boost capabilities not present in the processor\n"); + return; + } + + loongson3_cpufreq_driver.set_boost = set_boost; +} + +static int cpufreq_supported_detect(void) +{ + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +static int __init loongson3_cpufreq_init(void) +{ + int ret; + + if (!cpu_has_csr || !cpu_has_scalefreq) + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; + + if (cpufreq_supported_detect()) { + pr_info("%s failed!\n", __func__); + return -ENODEV; + } + + ret = loongson3_cpufreq_early_init(); + if (ret) + return ret; + loongson3_cpufreq_boost_init(); + + cpufreq_register_notifier(&loongson3_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + cpufreq_kthread_create(); + if (ret) + free_acpi_perf_data(); + + return ret; +} + +static void __exit loongson3_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); + free_acpi_perf_data(); + cpufreq_kthread_stop(); +} + +late_initcall(loongson3_cpufreq_init); +module_exit(loongson3_cpufreq_exit); + +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + +MODULE_ALIAS("acpi"); -- Gitee From 9866d0816856ecead449bcb77d589caa61ecdf4b Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:10 +0800 Subject: [PATCH 142/953] anolis: fbdev: add ls2k500sfb driver for ls2k500 bmc. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/video/fbdev/Kconfig | 13 + drivers/video/fbdev/Makefile | 1 + drivers/video/fbdev/ls2k500sfb.c | 788 +++++++++++++++++++++++++++++++ 3 files changed, 802 insertions(+) create mode 100644 drivers/video/fbdev/ls2k500sfb.c diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index c29754b65c0e..35b3ca2fb50a 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1950,6 +1950,19 @@ config FB_SM712 called sm712fb. If you want to compile it as a module, say M here and read . +config FB_LS2K500 + tristate "Loongson LS2K500 frame buffer support" + depends on FB && PCI + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for the Loongson LS7A Platform-Bridge. + + This driver is also available as a module. + If you want to compile it as a module, say M here and read + . + source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 70569f7027ed..d3fbb185daa3 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -128,3 +128,4 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o +obj-$(CONFIG_FB_LS2K500) += ls2k500sfb.o diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c new file mode 100644 index 000000000000..a3722dcaada0 --- /dev/null +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -0,0 +1,788 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * linux/drivers/video/ls2k500sfb.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mode_option[32] = "1280x1024-32@2M"; +module_param_string(mode, mode_option, sizeof(mode_option), 0444); +static int useshell; +module_param(useshell, int, 0664); +static int totty = 18; +module_param(totty, int, 0664); +static int resetdelay = 60; +module_param(resetdelay, int, 0664); +static int resetbootwait = 10; +module_param(resetbootwait, int, 0664); +static int GPIO = 14; +module_param(GPIO, int, 0664); +struct ls2k500sfb_struct { + struct pci_dev *dev; + struct platform_device *pd; + struct workqueue_struct *wq; + struct work_struct work; + struct delayed_work redraw_work; + int running; + unsigned long reset_time; + char *penv; + char saved_env[16]; +}; + +static int saved_console; +static unsigned long mscycles; +static atomic_t waiting_for_pciebreak_ipi; + +static int switch_console(int console) +{ + struct file *filp; + + filp = filp_open("/dev/tty1", O_RDWR, 0); + if (IS_ERR(filp)) + return -ENODEV; + + vfs_ioctl(filp, VT_ACTIVATE, console + 1); + filp_close(filp, NULL); + return 0; +} +static void ls2k500sfb_pciebreak_func(void *unused) +{ + atomic_dec(&waiting_for_pciebreak_ipi); + + while (atomic_read(&waiting_for_pciebreak_ipi)) + cpu_relax(); +} + +static void pciebreak_smp_send_stop(int ms) +{ + /* Wait at most 100 msecond for the other cpus to stop */ + unsigned long max_cycles = mscycles * ms; + unsigned long start_time = get_cycles(); + + atomic_set(&waiting_for_pciebreak_ipi, num_online_cpus()); + smp_call_function(ls2k500sfb_pciebreak_func, NULL, false); + while ((atomic_read(&waiting_for_pciebreak_ipi) > 1) + && get_cycles() - start_time < max_cycles) { + cpu_relax(); + } + if (atomic_read(&waiting_for_pciebreak_ipi) > 1) + pr_emerg("Non-pciebreaking CPUs did not react to IPI\n"); +} +static void ls2k500sfb_redraw_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = + container_of(work, struct ls2k500sfb_struct, redraw_work.work); + /*restore resolution info */ + if (memcmp(priv->penv, priv->saved_env, sizeof(priv->saved_env))) + memcpy(priv->penv, priv->saved_env, sizeof(priv->saved_env)); + switch_console(saved_console); +} + +static void ls2k500sfb_events_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); + struct pci_dev *pdev = priv->dev; + struct pci_dev *ppdev = pdev->bus->self; + uint32_t i, d, timeout, retry = 0; + static const uint32_t index[] = { + 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x30, 0x3c, 0x54, 0x58, 0x78, 0x7c, 0x80, 4 + }; + + static uint32_t data[sizeof(index) / 4]; + static const uint32_t cindex[] = { 0x10, 0x3c, 4 }; + + static uint32_t cdata[sizeof(cindex) / 4]; + static uint32_t d80c, d71c, ctrl; + static void *p; + + if (!priv->running) { + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_read_config_dword(ppdev, index[i], &data[i]); + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_read_config_dword(pdev, cindex[i], &cdata[i]); + if (ppdev->vendor == 0x14) { + pci_read_config_dword(ppdev, 0x80c, &d80c); + d80c = (d80c & ~(3 << 17)) | (1 << 17); + + pci_read_config_dword(ppdev, 0x71c, &d71c); + d71c |= 1 << 26; + + p = pci_iomap(ppdev, 0, 0x100); + } + ctrl = readl(p); + return; + } + local_bh_disable(); + pciebreak_smp_send_stop(100); + wmb(); /* flush all write before we disable pcie window */ + pci_write_config_dword(ppdev, 0x18, 0); + pci_write_config_dword(ppdev, 0x1c, 0); + pci_write_config_dword(ppdev, 0x20, 0); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after change pcie window */ + local_bh_enable(); + if (ppdev->vendor == 0x14) { + timeout = 10000; + while (timeout) { + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) + break; + mdelay(1); + timeout--; + }; + if (!timeout) + pr_info("bar not clear 0\n"); + + pci_read_config_dword(ppdev, 0x0, &d); + pr_info("pcie port deviceid=0x%x recover begin\n", d); +retrain: + while (1) { + pci_write_config_dword(ppdev, index[0], data[0]); + pci_read_config_dword(ppdev, index[0], &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + while (1) { + for (i = 0; i < ARRAY_SIZE(index); i++) { + if (index[i] != 0x18 && index[i] != 0x1c && index[i] != 0x20) + pci_write_config_dword(ppdev, index[i], data[i]); + } + pci_write_config_dword(ppdev, 0x80c, d80c); + pci_write_config_dword(ppdev, 0x71c, d71c); + + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + timeout = 10000; + + writel(ctrl | 0x8, p); + while (1) { + d = readl(p + 0xc); + if ((d & 0x11) == 0x11) { + break; + } else if (!timeout) { + pr_info("pcie train failed status=0x%x\n", d); + goto out; + } + mdelay(1); + timeout--; + } + + + pr_info("pcie recovered done\n"); + + if (!retry) { + /*wait u-boot ddr config */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) { + retry = 1; + goto retrain; + } + } + } else { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + } + local_bh_disable(); + pciebreak_smp_send_stop(10000); + wmb(); /* flush all write before we update pcie window */ + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_write_config_dword(ppdev, index[i], data[i]); + + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_write_config_dword(pdev, cindex[i], cdata[i]); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after we update pcie window */ + local_bh_enable(); + + + pr_info("redraw console\n"); + + saved_console = fg_console; + switch_console(fg_console > 0?fg_console - 1 : fg_console + 1); + queue_delayed_work(priv->wq, &priv->redraw_work, HZ); +out: + priv->running = 0; +} + +irqreturn_t ls2k500sfb_interrupt(int irq, void *arg) +{ + struct ls2k500sfb_struct *priv = arg; + struct pci_dev *pdev = priv->dev; + + if (irq == pdev->irq) + pr_info("ls2k500sfb pcie interrupt\n"); + else + pr_info("ls2k500sfb gpio interrupt\n"); + if (system_state != SYSTEM_RUNNING) + return IRQ_HANDLED; + + if (!priv->running) { + if (!resetdelay || time_after(jiffies, priv->reset_time + resetdelay * HZ)) { + priv->running = 1; + queue_work(priv->wq, &priv->work); + } + priv->reset_time = jiffies; + } + return IRQ_HANDLED; +} + +#ifdef CONFIG_LOONGARCH +#define GPIO_OEN ((void *)IO_BASE+0x1fe00000+0x500) +#define GPIO_FUNCEN ((void *)IO_BASE+0x1fe00000+0x504) +#define GPIO_OUT ((void *)IO_BASE+0x1fe00000+0x508) +#define GPIO_IN ((void *)IO_BASE+0x1fe00000+0x50c) +#define GPIO_INTPOL ((void *)IO_BASE+0x1fe00000+0x510) +#define GPIO_INTEN ((void *)IO_BASE+0x1fe00000+0x514) + +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} +static int get_gpio_irq_from_acpi_table(int gpio) +{ + struct gpio_chip *chip; + struct gpio_desc *desc; + + chip = gpiochip_find("LOON0007:00", gpiochip_match_name); + if (!chip) + return -ENOENT; + desc = gpiochip_request_own_desc(chip, gpio, "reboot", GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_IN); + if (!desc) + return -ENOENT; + return gpiod_to_irq(desc); +} + +static int get_gpio_irq_from_acpi_gsi(int gpio) +{ + int gsi = 16 + (gpio & 7); + + return acpi_register_gsi(NULL, gsi, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW); +} + +static int register_gpio_reboot_handler(struct ls2k500sfb_struct *priv) +{ + int irq = get_gpio_irq_from_acpi_table(GPIO); + + if (irq < 0) { + irq = get_gpio_irq_from_acpi_gsi(GPIO); + pr_notice("gsi gpio irq %d\n", irq); + } else + pr_notice("acpi gpio irq %d\n", irq); + writel(readl(GPIO_OEN) | (0x1 << GPIO), GPIO_OEN); + writel(readl(GPIO_FUNCEN) & ~(0x1 << GPIO), GPIO_FUNCEN); + writel(readl(GPIO_INTPOL) & ~(0x1 << GPIO), GPIO_INTPOL); + writel(readl(GPIO_INTEN) | (0x1 << GPIO), GPIO_INTEN); + if (request_irq(irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_FALLING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", irq); + return 0; +} +#endif + +static const struct fb_fix_screeninfo simplefb_fix = { + .id = "simple", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, +}; + +static const struct fb_var_screeninfo simplefb_var = { + .height = -1, + .width = -1, + .activate = FB_ACTIVATE_NOW, + .vmode = FB_VMODE_NONINTERLACED, +}; + +#define PSEUDO_PALETTE_SIZE 16 +struct simplefb_par { + char *penv; + char *preg; + u32 palette[PSEUDO_PALETTE_SIZE]; +}; + +static u_long get_line_length(int xres_virtual, int bpp) +{ + u_long length; + + length = xres_virtual * bpp; + length = (length + 31) & ~31; + length >>= 3; + return length; +} + +static int simplefb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + u_long line_length; + + /* + * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! + * as FB_VMODE_SMOOTH_XPAN is only used internally + */ + + if (var->vmode & FB_VMODE_CONUPDATE) { + var->vmode |= FB_VMODE_YWRAP; + var->xoffset = info->var.xoffset; + var->yoffset = info->var.yoffset; + } + + /* + * Some very basic checks + */ + if (!var->xres) + var->xres = 1; + if (!var->yres) + var->yres = 1; + if (var->xres > var->xres_virtual) + var->xres_virtual = var->xres; + if (var->yres > var->yres_virtual) + var->yres_virtual = var->yres; + if (var->bits_per_pixel <= 16) + var->bits_per_pixel = 16; + else if (var->bits_per_pixel <= 32) + var->bits_per_pixel = 32; + else + return -EINVAL; + + if (var->xres_virtual < var->xoffset + var->xres) + var->xres_virtual = var->xoffset + var->xres; + if (var->yres_virtual < var->yoffset + var->yres) + var->yres_virtual = var->yoffset + var->yres; + + /* + * Memory limit + */ + line_length = + get_line_length(var->xres_virtual, var->bits_per_pixel); + if (line_length * var->yres_virtual > info->fix.smem_len) + return -ENOMEM; + + /* + * Now that we checked it we alter var. The reason being is that the video + * mode passed in might not work but slight changes to it might make it + * work. This way we let the user know what is acceptable. + */ + switch (var->bits_per_pixel) { + case 16: /* BGR 565 */ + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: /* BGRA 8888 */ + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + } + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + + return 0; +} + +static int simplefb_set_par(struct fb_info *info) +{ + struct simplefb_par *par = info->par; + int reg_val; + + info->fix.line_length = get_line_length(info->var.xres_virtual, + info->var.bits_per_pixel); + sprintf(par->penv, "video=%dx%d-%d@2M", + info->var.xres_virtual, + info->var.yres_virtual, + info->var.bits_per_pixel); + + reg_val = readl(par->preg); + writel(reg_val + 1, par->preg); + + return 0; +} + +static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= PSEUDO_PALETTE_SIZE) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + pal[regno] = value; + + return 0; +} + + +static void simplefb_destroy(struct fb_info *info) +{ + if (info->screen_base) + iounmap(info->screen_base); +} + +static const struct fb_ops simplefb_ops = { + .owner = THIS_MODULE, + .fb_destroy = simplefb_destroy, + .fb_setcolreg = simplefb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_check_var = simplefb_check_var, + .fb_set_par = simplefb_set_par, +}; + +static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +static int simplefb_parse_pd(struct platform_device *pdev, + struct simplefb_params *params) +{ + struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); + int i; + + params->width = pd->width; + params->height = pd->height; + params->stride = pd->stride; + + params->format = NULL; + for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { + if (strcmp(pd->format, simplefb_formats[i].name)) + continue; + + params->format = &simplefb_formats[i]; + break; + } + + if (!params->format) { + dev_err(&pdev->dev, "Invalid format value\n"); + return -EINVAL; + } + + return 0; +} + +static int simplefb_probe(struct platform_device *pdev) +{ + int ret; + struct simplefb_params params; + struct fb_info *info; + struct simplefb_par *par; + struct resource *mem, *envmem, *regmem; + + ret = simplefb_parse_pd(pdev, ¶ms); + + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + envmem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + regmem = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!mem || !envmem || !regmem) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev); + if (!info) + return -ENOMEM; + platform_set_drvdata(pdev, info); + + par = info->par; + par->penv = ioremap(envmem->start, resource_size(envmem)); + par->preg = ioremap(regmem->start, resource_size(regmem)); + + info->fix = simplefb_fix; + info->fix.smem_start = mem->start; + info->fix.smem_len = resource_size(mem); + info->fix.line_length = params.stride; + + info->var = simplefb_var; + info->var.xres = params.width; + info->var.yres = params.height; + info->var.xres_virtual = params.width; + info->var.yres_virtual = params.height; + info->var.bits_per_pixel = params.format->bits_per_pixel; + info->var.red = params.format->red; + info->var.green = params.format->green; + info->var.blue = params.format->blue; + info->var.transp = params.format->transp; + + ret = devm_aperture_acquire_for_platform_device(pdev, + info->fix.smem_start, + info->fix.smem_len); + if (ret) { + dev_info(&pdev->dev, "cannot acquire aperture\n"); + goto error_fb_release; + } + + info->fbops = &simplefb_ops; + info->flags = 0; + info->screen_base = ioremap_wc(info->fix.smem_start, + info->fix.smem_len); + if (!info->screen_base) { + ret = -ENOMEM; + goto error_fb_release; + } + info->pseudo_palette = par->palette; + + dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", + info->fix.smem_start, info->fix.smem_len, + info->screen_base); + dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n", + params.format->name, + info->var.xres, info->var.yres, + info->var.bits_per_pixel, info->fix.line_length); + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); + goto error_fb_release; + } else + dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); + + local_irq_disable(); + mscycles = get_cycles(); + mdelay(1); + mscycles = get_cycles() - mscycles; + local_irq_enable(); + + return ret; +error_fb_release: + framebuffer_release(info); + return ret; +} + +static int simplefb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + + unregister_framebuffer(info); + framebuffer_release(info); + + return 0; +} + +static struct platform_driver simplefb_driver = { + .driver = { + .name = "virt-framebuffer", + }, + .probe = simplefb_probe, + .remove = simplefb_remove, +}; + +static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct simplefb_platform_data mode; + struct resource res[3]; + struct platform_device *pd; + struct ls2k500sfb_struct *priv; + long phybase, videooffset, videomemorysize; + char *pmode = mode_option; + int depth; + char *penv; + int ret, i; + + if (!dev->bus->number || pci_enable_device(dev)) + return -ENODEV; + priv = kzalloc(sizeof(struct ls2k500sfb_struct), GFP_KERNEL); + priv->dev = dev; + + /* pcimem bar last 16M free, 2MB offset from free for framebuffer */ + phybase = pci_resource_start(dev, 0); + phybase += pci_resource_len(dev, 0) - 0x1000000; + penv = ioremap(phybase, 0x100000); + /*env at last 16M's beginning, first env is video */ + if (!strncmp(penv, "video=", 6)) + pmode = penv + 6; + + priv->penv = penv + 6; + memcpy(priv->saved_env, priv->penv, sizeof(priv->saved_env)); + + mode.width = simple_strtoul(pmode, &pmode, 0); + pmode++; + mode.height = simple_strtoul(pmode, &pmode, 0); + pmode++; + depth = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + pmode++; + videooffset = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + switch (pmode[0]) { + case 'M': + case 'm': + videooffset *= 0x100000; + break; + case 'K': + case 'k': + videooffset *= 1024; + break; + } + } + } else + videooffset = 0x200000; + mode.stride = mode.width * depth / 8; + mode.format = depth == 32 ? "a8r8g8b8" : "r5g6b5"; + + videomemorysize = 0x400000; + + memset(res, 0, sizeof(res)); + res[0].start = phybase + videooffset; + res[0].end = phybase + videooffset + videomemorysize - 1; + res[0].flags = IORESOURCE_MEM; + res[0].parent = &dev->resource[0]; + + res[1].start = phybase; + res[1].end = phybase + 64 - 1; + res[1].flags = IORESOURCE_MEM; + res[1].parent = &dev->resource[0]; + + res[2].start = phybase + 0x00f00014; + res[2].end = phybase + 0x00f0001c - 1; + res[2].flags = IORESOURCE_MEM; + res[2].parent = &dev->resource[0]; + + priv->pd = pd = platform_device_register_resndata(NULL, "virt-framebuffer", 0, + res, 3, &mode, sizeof(mode)); + + ret = platform_driver_register(&simplefb_driver); + if (ret) + return ret; + priv->wq = create_singlethread_workqueue("ls2k500sfb wq"); + INIT_WORK(&priv->work, ls2k500sfb_events_fn); + INIT_DELAYED_WORK(&priv->redraw_work, ls2k500sfb_redraw_fn); + + ls2k500sfb_events_fn(&priv->work); + if (request_irq(dev->irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_RISING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", dev->irq); + #ifdef CONFIG_LOONGARCH + register_gpio_reboot_handler(priv); + #endif + pci_set_drvdata(dev, priv); + for (i = 0; i < 5; i++) { + res[0].start = phybase + 0x00f00000 + 0x1c*i; + res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; + platform_device_register_simple("ipmi_ls2k500_si", i, res, 1); + } + + return PTR_ERR_OR_ZERO(pd); +} + +static void ls2k500sfb_remove(struct pci_dev *dev) +{ + struct ls2k500sfb_struct *priv = pci_get_drvdata(dev); + + platform_device_del(priv->pd); +} + +static struct pci_device_id ls2k500sfb_devices[] = { + {PCI_DEVICE(0x14, 0x1a05)}, + {0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, ls2k500sfb_devices); + +static struct pci_driver ls2k500sfb_driver = { + .name = "ls2k500sfb", + .id_table = ls2k500sfb_devices, + .probe = ls2k500sfb_probe, + .remove = ls2k500sfb_remove, + .driver = { + .name = "ls2k500sfb", + }, +}; + +static int __init ls2k500sfb_init(void) +{ + return pci_register_driver(&ls2k500sfb_driver); +} + +module_init(ls2k500sfb_init); + +#ifdef MODULE +static void __exit ls2k500sfb_exit(void) +{ + pci_unregister_driver(&ls2k500sfb_driver); +} + +module_exit(ls2k500sfb_exit); +#endif + +MODULE_LICENSE("GPL"); -- Gitee From 9c2a046bad19eb84b6c82e20f4c6b97804d4aad8 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:11 +0800 Subject: [PATCH 143/953] anolis: ipmi: add ls2k500 bmc ipmi support. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/char/ipmi/Makefile | 4 + drivers/char/ipmi/btlock.h | 92 +++++++++++++++ drivers/char/ipmi/ipmi_si.h | 11 ++ drivers/char/ipmi/ipmi_si_intf.c | 4 + drivers/char/ipmi/ipmi_si_ls2k500.c | 173 ++++++++++++++++++++++++++++ drivers/char/ipmi/kcs_bmc_ls2k500.h | 67 +++++++++++ drivers/video/fbdev/ls2k500sfb.c | 6 +- 7 files changed, 356 insertions(+), 1 deletion(-) create mode 100644 drivers/char/ipmi/btlock.h create mode 100644 drivers/char/ipmi/ipmi_si_ls2k500.c create mode 100644 drivers/char/ipmi/kcs_bmc_ls2k500.h diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded9..bc9c6506fd59 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -13,6 +13,10 @@ ifdef CONFIG_PARISC ipmi_si-y += ipmi_si_parisc.o endif +ifdef CONFIG_LOONGARCH +ipmi_si-y += ipmi_si_ls2k500.o +endif + obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o diff --git a/drivers/char/ipmi/btlock.h b/drivers/char/ipmi/btlock.h new file mode 100644 index 000000000000..cf585e42d42d --- /dev/null +++ b/drivers/char/ipmi/btlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BTLOCK_H__ +#define __BTLOCK_H__ + +#include +#include + +union btlock { + char b[2]; + unsigned int u; +}; + +/* + *wait delay us if lock failed. + *lock fail if another one get lock or both try get lock. + *c must compile b with byte access. + */ +static inline int btlock_lock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + unsigned long c0 = get_cycles(), c1; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + while (1) { + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + c1 = get_cycles(); + if (c1 - c0 > *mscycles * 1000) + return -1; + ndelay(((t.b[1 - n] & 0x7f) + (c1 & 1)) * 100); + } + return 0; +} + +static inline int btlock_trylock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + ndelay(((t.b[1 - n] & 0x7f) + (get_cycles() & 1)) * 100); + return -1; +} + +static inline int btlock_unlock(volatile union btlock *p, int n) +{ + p->b[n] = 0; + wmb(); /* flush write out immediately */ + return p->u; +} + +static inline int btlock_islocked(volatile union btlock *p, int n) +{ + union btlock t; + + t.u = p->u; + return t.b[n] && !t.b[1 - n]; +} +#endif diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753..aa2f81472ce5 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -51,6 +51,9 @@ struct si_sm_io { unsigned int regshift; enum ipmi_addr_space addr_space; unsigned long addr_data; +#ifdef CONFIG_LOONGARCH + void *addr_source_data; +#endif enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ union ipmi_smi_info_union addr_info; @@ -101,6 +104,14 @@ static inline void ipmi_si_parisc_init(void) { } static inline void ipmi_si_parisc_shutdown(void) { } #endif +#ifdef CONFIG_LOONGARCH +int ipmi_si_ls2k500_init(void); +void ipmi_si_ls2k500_shutdown(void); +#else +static inline void ipmi_si_ls2k500_init(void) { } +static inline void ipmi_si_ls2k500_shutdown(void) { } +#endif + int ipmi_si_port_setup(struct si_sm_io *io); int ipmi_si_mem_setup(struct si_sm_io *io); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc97..373ee71811e3 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2104,6 +2104,8 @@ static int __init init_ipmi_si(void) ipmi_si_platform_init(); + ipmi_si_ls2k500_init(); + ipmi_si_pci_init(); ipmi_si_parisc_init(); @@ -2289,6 +2291,8 @@ static void cleanup_ipmi_si(void) ipmi_si_parisc_shutdown(); + ipmi_si_ls2k500_shutdown(); + ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); diff --git a/drivers/char/ipmi/ipmi_si_ls2k500.c b/drivers/char/ipmi/ipmi_si_ls2k500.c new file mode 100644 index 000000000000..7e259d85729f --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k500.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ipmi_si_pci.c + * + * Handling for IPMI devices on the PCI bus. + */ + +#define pr_fmt(fmt) "ipmi_pci: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipmi_si.h" +static unsigned long *mscycles; +static unsigned long *event_jiffies; +#include "kcs_bmc_ls2k500.h" +static int resetbootwait = 60; +module_param(resetbootwait, int, 0664); + +#define KCS_STATUS_CMD_DAT BIT(3) + +static int pcie_busy(void) +{ + if (time_before(jiffies, *event_jiffies + resetbootwait*HZ)) + return -1; + return 0; +} + +static unsigned char intf_sim_inb(const struct si_sm_io *io, + unsigned int offset) +{ + IPMIKCS *ik = io->addr_source_data; + uint32_t ret; + + if (pcie_busy()) + return 0; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return 0; + switch (offset & 1) { + case 0: + ret = ik->data_out_reg; + IPMI_KCS_SET_OBF(ik->status_reg, 0); + break; + case 1: + ret = ik->status_reg; + break; + } + btlock_unlock(&ik->lock, 0); + return ret; +} + +static void intf_sim_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + IPMIKCS *ik = io->addr_source_data; + + if (pcie_busy()) + return; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return; + if (IPMI_KCS_GET_IBF(ik->status_reg)) + goto out; + + switch (offset & 1) { + case 0: + ik->data_in_reg = val; + ik->status_reg &= ~KCS_STATUS_CMD_DAT; + break; + + case 1: + ik->cmd_reg = val; + ik->status_reg |= KCS_STATUS_CMD_DAT; + break; + } + IPMI_KCS_SET_IBF(ik->status_reg, 1); + ik->write_req++; +out: + btlock_unlock(&ik->lock, 0); +} + +static void ipmi_ls2k500_cleanup(struct si_sm_io *io) +{ +} + +int ipmi_si_sim_setup(struct si_sm_io *io) +{ + io->inputb = intf_sim_inb; + io->outputb = intf_sim_outb; + io->io_cleanup = ipmi_ls2k500_cleanup; + return 0; +} + +#define platform_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define platform_resource_end(dev, bar) ((dev)->resource[(bar)].end) +static int of_ipmi_ls2k500_probe(struct platform_device *pdev) +{ + int rv; + struct si_sm_io io; + void **kcs_data; + + memset(&io, 0, sizeof(io)); + io.addr_source = SI_PLATFORM; + dev_info(&pdev->dev, "probing via ls2k500 platform"); + io.si_type = SI_KCS; + + io.addr_space = IPMI_MEM_ADDR_SPACE; + io.io_setup = ipmi_si_sim_setup; + io.addr_data = pdev->resource[0].start; + io.addr_source_data = ioremap(pdev->resource[0].start, + pdev->resource[0].end - + pdev->resource[0].start + 1); + kcs_data = dev_get_platdata(&pdev->dev); + event_jiffies = kcs_data[0]; + mscycles = kcs_data[1]; + io.dev = &pdev->dev; + io.regspacing = 4; + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + io.irq = 0; + if (io.irq) + io.irq_setup = ipmi_std_irq_setup; + + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], io.regsize, io.regspacing, io.irq); + + rv = ipmi_si_add_smi(&io); + if (rv) + ipmi_si_remove_by_dev(&pdev->dev); + + return rv; +} + +static int ipmi_ls2k500_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); + + return 0; +} + +#define LS2K500_SI_DEVICE_NAME "ipmi_ls2k500_si" +struct platform_driver ipmi_ls2k500_platform_driver = { + .driver = { + .name = LS2K500_SI_DEVICE_NAME, + }, + .probe = of_ipmi_ls2k500_probe, + .remove = ipmi_ls2k500_remove, +}; + +static bool platform_registered; +int ipmi_si_ls2k500_init(void) +{ + int rv; + + rv = platform_driver_register(&ipmi_ls2k500_platform_driver); + if (rv) + pr_err("Unable to register driver: %d\n", rv); + else + platform_registered = true; + return rv; +} + +void ipmi_si_ls2k500_shutdown(void) +{ + if (platform_registered) + platform_driver_unregister(&ipmi_ls2k500_platform_driver); +} diff --git a/drivers/char/ipmi/kcs_bmc_ls2k500.h b/drivers/char/ipmi/kcs_bmc_ls2k500.h new file mode 100644 index 000000000000..86e08a08d41a --- /dev/null +++ b/drivers/char/ipmi/kcs_bmc_ls2k500.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KCS_BMC_LS2K500__ +#define __KCS_BMC_LS2K500__ 1 +#include +#include "btlock.h" +#define IPMI_KCS_OBF_BIT 0 +#define IPMI_KCS_IBF_BIT 1 +#define IPMI_KCS_SMS_ATN_BIT 2 +#define IPMI_KCS_CD_BIT 3 + +#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT) +#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1) +#define IPMI_KCS_SET_OBF(d, v) ((d) = (((d) & ~IPMI_KCS_OBF_MASK) | \ + (((v) & 1) << IPMI_KCS_OBF_BIT))) +#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT) +#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1) +#define IPMI_KCS_SET_IBF(d, v) ((d) = (((d) & ~IPMI_KCS_IBF_MASK) | \ + (((v) & 1) << IPMI_KCS_IBF_BIT))) +#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT) +#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1) +#define IPMI_KCS_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \ + ((v) & 1) << IPMI_KCS_SMS_ATN_BIT)) +#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT) +#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1) +#define IPMI_KCS_SET_CD(d, v) ((d) = (((d) & ~IPMI_KCS_CD_MASK) | \ + (((v) & 1) << IPMI_KCS_CD_BIT))) + +#define IPMI_KCS_IDLE_STATE 0 +#define IPMI_KCS_READ_STATE 1 +#define IPMI_KCS_WRITE_STATE 2 +#define IPMI_KCS_ERROR_STATE 3 + +#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3) +#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6)) + +#define IPMI_KCS_ABORT_STATUS_CMD 0x60 +#define IPMI_KCS_WRITE_START_CMD 0x61 +#define IPMI_KCS_WRITE_END_CMD 0x62 +#define IPMI_KCS_READ_CMD 0x68 +#define IPMI_KCS_STATUS_NO_ERR 0x00 +#define IPMI_KCS_STATUS_ABORTED_ERR 0x01 +#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02 +#define IPMI_KCS_STATUS_LENGTH_ERR 0x06 +#define KCS_STATUS_CMD_DAT BIT(3) + +typedef struct IPMIKCS { + union btlock lock; + uint8_t status_reg; + uint8_t data_out_reg; + + int16_t data_in_reg; + int16_t cmd_reg; + int16_t reserved2; + + uint32_t write_req; + uint32_t write_ack; + + uint32_t reserved3; + uint32_t reserved4; +} IPMIKCS; + +struct loongson_kcs_bmc { + struct list_head next; + IPMIKCS *kcs; + struct kcs_bmc *bmc; +}; +#endif diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c index a3722dcaada0..00a83ea7c1e3 100644 --- a/drivers/video/fbdev/ls2k500sfb.c +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -111,6 +111,7 @@ static void ls2k500sfb_redraw_fn(struct work_struct *work) switch_console(saved_console); } +static unsigned long event_jiffies; static void ls2k500sfb_events_fn(struct work_struct *work) { struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); @@ -151,6 +152,7 @@ static void ls2k500sfb_events_fn(struct work_struct *work) pci_write_config_dword(ppdev, 0x18, 0); pci_write_config_dword(ppdev, 0x1c, 0); pci_write_config_dword(ppdev, 0x20, 0); + event_jiffies = jiffies; atomic_set(&waiting_for_pciebreak_ipi, 0); wmb(); /* flush all write after change pcie window */ local_bh_enable(); @@ -648,6 +650,7 @@ static struct platform_driver simplefb_driver = { .remove = simplefb_remove, }; +static void *kcs_data[2] = {&event_jiffies, &mscycles}; static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct simplefb_platform_data mode; @@ -740,7 +743,8 @@ static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) for (i = 0; i < 5; i++) { res[0].start = phybase + 0x00f00000 + 0x1c*i; res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; - platform_device_register_simple("ipmi_ls2k500_si", i, res, 1); + platform_device_register_resndata(NULL, "ipmi_ls2k500_si", i, res, 1, + kcs_data, sizeof(kcs_data)); } return PTR_ERR_OR_ZERO(pd); -- Gitee From deaa046e2664b5f08511b6ec31387a521ed1c539 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:12 +0800 Subject: [PATCH 144/953] anolis: LoongArch: defconfig: enable CONFIG_FB_LS2K500=m. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/configs/loongson3_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 5bc8803dee3e..569484674f7e 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -674,6 +674,7 @@ CONFIG_DRM_LOONGSON=y CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m # CONFIG_VGA_CONSOLE is not set -- Gitee From e560ebcdb9eeeac506548bacb8290d05bea48bc6 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:13 +0800 Subject: [PATCH 145/953] anolis: LoongArch: fix ls2k500 bmc not work when installing iso ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/gpu/drm/loongson/loongson_module.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c index d2a51bd395f6..37b7d97c4e70 100644 --- a/drivers/gpu/drm/loongson/loongson_module.c +++ b/drivers/gpu/drm/loongson/loongson_module.c @@ -19,6 +19,21 @@ module_param_named(vblank, loongson_vblank, int, 0400); static int __init loongson_module_init(void) { + struct pci_dev *pdev = NULL; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { + /* + * Multiple video card workaround + * + * This integrated video card will always be selected as + * default boot device by vgaarb subsystem. + */ + if (pdev->vendor != PCI_VENDOR_ID_LOONGSON || pdev->device == 0x1a05) { + pr_info("Discrete graphic card detected, abort\n"); + return 0; + } + } + if (!loongson_modeset || video_firmware_drivers_only()) return -ENODEV; -- Gitee From e6cae20e1a36f496ec65a161b46824ae62595f04 Mon Sep 17 00:00:00 2001 From: Baoqi Zhang Date: Mon, 18 Dec 2023 10:20:11 +0800 Subject: [PATCH 146/953] anolis: LS7A2000: Add quirk for OHCI device rev 0x02 ANBZ: #8435 Signed-off-by: Baoqi Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 8b34ccff073a..0b5c55330585 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -32,6 +32,7 @@ #define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GNET 0x7a13 #define DEV_LS7A_EHCI 0x7a14 +#define DEV_LS7A_OHCI 0x7a24 #define DEV_LS7A_DC2 0x7a36 #define DEV_LS7A_HDMI 0x7a37 @@ -163,6 +164,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_HDMI, loongson_pci_pin_quirk); +static void loongson_ohci_quirk(struct pci_dev *dev) +{ + if (dev->revision == 0x2) + dev->resource[0].start += 0x1000; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; -- Gitee From fa84b475194a47c6a36555397a08666157c5ef3f Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:12 +0800 Subject: [PATCH 147/953] anolis: PCI: Check if entry->offset already exist for mem resource ANBZ: #8435 Fix patch "LoongArch: Add PCI controller support" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/pci/acpi.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 813b668450c1..c2219ac1d7e8 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -214,9 +214,11 @@ static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { - entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); - entry->res->start |= entry->offset; - entry->res->end |= entry->offset; + if (!entry->offset) { + entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); + entry->res->start |= entry->offset; + entry->res->end |= entry->offset; + } } } return status; -- Gitee From 8cd4b70d83699eaf73b7fe96a0e6706a020b6f09 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:13 +0800 Subject: [PATCH 148/953] anolis: PCI: Check if the pci controller can use both CFG0 and CFG1 mode to access configuration space ANBZ: #8435 Fix patch "PCI: loongson: Use generic 8/16/32-bit config ops on LS2K/LS7A" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 0b5c55330585..701c6f935ee1 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -316,6 +316,7 @@ static int loongson_pci_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; + unsigned int num = 0; if (!node) return -ENODEV; @@ -340,7 +341,9 @@ static int loongson_pci_probe(struct platform_device *pdev) } if (priv->data->flags & FLAG_CFG1) { - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (priv->cfg0_base) + num = 1; + regs = platform_get_resource(pdev, IORESOURCE_MEM, num); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { -- Gitee From 8c3d434e44fe6055303538bbc7a430480b3bbe22 Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Mon, 18 Dec 2023 10:20:14 +0800 Subject: [PATCH 149/953] anolis: PCI: PM: fix pcie mrrs restoring ANBZ: #8435 Don't limit mrrs during resume, so that saved value can be restored. Fix patch "PCI: loongson: Improve the MRRS quirk for LS7A" Signed-off-by: Jianmin Lv Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/pci.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 06fc6f532d6c..d3f9688491b4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -172,6 +173,11 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifndef CONFIG_PM_SLEEP +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -6179,7 +6185,8 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - if (bridge->no_inc_mrrs) { + if (pm_suspend_target_state == PM_SUSPEND_ON && + bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { -- Gitee From bba36281a16279175b08f6bc3e6f6c5e9b19b9bb Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Mon, 18 Dec 2023 10:20:15 +0800 Subject: [PATCH 150/953] anolis: PCI: fix kabi error caused by pm_suspend_target_state ANBZ: #8435 fix kabi error caused by pm_suspend_target_state,used only by loongson devices. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/pci.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d3f9688491b4..70e61f719040 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,7 +32,9 @@ #include #include #include +#ifdef CONFIG_MACH_LOONGSON64 #include +#endif #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -173,11 +175,15 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifdef CONFIG_MACH_LOONGSON64 + #ifndef CONFIG_PM_SLEEP suspend_state_t pm_suspend_target_state; #define pm_suspend_target_state (PM_SUSPEND_ON) #endif +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -6166,8 +6172,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; +#ifdef CONFIG_MACH_LOONGSON64 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); - +#endif if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -6185,6 +6192,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; +#ifdef CONFIG_MACH_LOONGSON64 if (pm_suspend_target_state == PM_SUSPEND_ON && bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); @@ -6194,6 +6202,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; } } +#endif ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); -- Gitee From 98c1c063236814ec64928634224c7c2cf30a5be2 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:16 +0800 Subject: [PATCH 151/953] anolis: LoongArch: fix some PCIE card not scanning properly ANBZ: #8435 Fix some pcie card not scanning properly when bus number is inconsistent during firmware and kernel scan phases. Signed-off-by: liuyun Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 34 +++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 701c6f935ee1..a0050da6d313 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -250,6 +250,36 @@ static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, return NULL; } +static int pci_loongson_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = bus->ops->map_bus(bus, devfn, where); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (size == 1) + *val = readb(addr); + else if (size == 2) + *val = readw(addr); + else + *val = readl(addr); + /* + * fix some pcie card not scanning properly when bus number is + * inconsistent during firmware and kernel scan phases. + */ + if (*val == 0x0 && where == PCI_VENDOR_ID) { + writel(*val, addr); + *val = readl(addr); + } + + + return PCIBIOS_SUCCESSFUL; +} + #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) @@ -273,7 +303,7 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, }; @@ -400,7 +430,7 @@ const struct pci_ecam_ops loongson_pci_ecam_ops = { .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, } }; -- Gitee From 764d04be5bde2ab13cb6b159ca03779d8264f35d Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Mon, 18 Dec 2023 10:20:17 +0800 Subject: [PATCH 152/953] anolis: PCI: LS7A2000: fix pm transition of devices under pcie port ANBZ: #8435 Signed-off-by: Jianmin Lv Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index a0050da6d313..7ef3c18a9ccf 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -81,6 +81,20 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +static void loongson_d3_quirk(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + pdev->no_d1d2 = 1; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_d3_quirk); + /* * Some Loongson PCIe ports have hardware limitations on their Maximum Read * Request Size. They can't handle anything larger than this. Sane -- Gitee From 306f185794a5d1fcb20dd9e6fae49404ce7447c2 Mon Sep 17 00:00:00 2001 From: Baoqi Zhang Date: Mon, 18 Dec 2023 10:20:18 +0800 Subject: [PATCH 153/953] anolis: PCI: LS7A2000: fix GPU card error ANBZ: #8435 Add window to solve GPU access error Signed-off-by: Baoqi Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 7ef3c18a9ccf..de920bb90d25 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -185,6 +185,44 @@ static void loongson_ohci_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (volatile void *)0x80000efdfb000174UL); + writel(0x80000000, (volatile void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; -- Gitee From 9f1e4a6cff873ac45b73731bbec7b46e67b46dce Mon Sep 17 00:00:00 2001 From: suijingfeng Date: Mon, 18 Dec 2023 10:20:19 +0800 Subject: [PATCH 154/953] anolis: PCI: fix X server auto probe fail when both ast and etnaviv drm present ANBZ: #8435 According to PCI-to-PCI bridge spec, bit 3 of Bridge Control Register is VGA Enable bit which modifies the response by the bridge to VGA compatible addresses. The Bridge Control register provides extensions to the Command register that are specific to a bridge. The Bridge Control register provides many of the same controls for the secondary interface that are provided by the Command register for the primary interface. There are some bits that affect the operation of both interfaces of the bridge. If the VGA Enable bit is set, the bridge will positively decode and forward the following accesses on the primary interface to the secondary interface (and, conversely, block the forwarding of these addresses from the secondary to primary interface) Forwarding of these accesses is qualified by the I/O Enable and Memory Enable bits in the Command register.) The default state of this bit after reset must be 0. Bit 3 of Bridge Control Register is VGA Enable bit which modifies the response by the bridge to VGA compatible addresses. when 0: do not forward VGA compatible memory and I/O addresses from the primary to secondary interface (addresses defined below) unless they are enabled for forwarding by the defined I/O when 1: forward VGA compatible memory and I/O addresses (addresses defined below) from the primary interface to the secondary interface (if the I/O Enable and Memory Enable bits are set) independent of the I/O and memory address ranges and independent of the ISA Enable bit * memory accesses in the range 000A 0000h to 000B FFFFh * I/O addresses in the first 64 KB of the I/O address space (AD[31:16] are 0000h) where AD[9:: 0] are in the ranges 3B0h to 3BBh and 3C0h to 3DFh (inclusive of ISA address aliases - AD[15::10] are not decoded) If the VGA Enable bit is set, forwarding of these accesses is independent of the I/O address range and memory address ranges defined by the I/O Base and Limit registers, the Memory Base and Limit registers, and the Prefetchable Memory Base and Limit registers of the bridge. Forwarding of these accesses is also independent of the settings of the ISA Enable bit (in the Bridge Control register) or VGA Palette Snoop bits (in the Command register). The AST2500 hardware we are using do not set the VGA Enable bit on its bridge control reg, this cause vgaarb subsystem don't think the VGA card behind this pridge as a valid boot vga device which made X server choose wrong video card to use when multiple video card present in the system. Its seems more vgaarb's fault than the ast2500 bmc itself. even through bit 3 of Bridge Control Register is 0, it should still allow to forward the accesses when the addresses is in the range of IO/MEM Base and Limit registers. Nevertheless, in order to support loongson CPU product line, we provide a workaround to this bug for the Sugon L620-G30 and Sugon L820-G30 server. see similar bug: https://patchwork.kernel.org/project/linux-pci/patch/20170619023528.11532-1-dja@axtens.net/ Signed-off-by: suijingfeng Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index de920bb90d25..df08dbf60c5c 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -223,6 +224,53 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; -- Gitee From fbdeceba3c173a42b20484f6091c8554b81e761a Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 18 Dec 2023 10:20:20 +0800 Subject: [PATCH 155/953] anolis: PCI: irq: Add early_param pci_irq_limit to limit pci irq numbers ANBZ: #8435 Signed-off-by: Juxin Gao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/msi/msi.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index ef1d8857a51b..8298d02a667b 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -402,12 +402,32 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } +#ifdef CONFIG_LOONGARCH +static unsigned int pci_irq_numbers = 32; + +static int __init pci_irq_limit(char *str) +{ + get_option(&str, &pci_irq_numbers); + + if (pci_irq_numbers == 0) + pci_irq_numbers = 32; + return 0; +} + +early_param("pci_irq_limit", pci_irq_limit); +#endif + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { int nvec; int rc; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + maxvec = pci_irq_numbers; +#endif + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) return -EINVAL; @@ -778,6 +798,11 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int { int hwsize, rc, nvec = maxvec; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + nvec = pci_irq_numbers; +#endif + if (maxvec < minvec) return -ERANGE; -- Gitee From 4dfd1a9c2807c992d5190ae61ac87596e2b5d04c Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:21 +0800 Subject: [PATCH 156/953] anolis: LoongArch: pci root bridige set acpi companion only when not acpi_disabled. ANBZ: #8435 Fix patch "LoongArch: Add PCI controller support" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/pci/acpi.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index c2219ac1d7e8..7dabf8d304eb 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,16 +26,17 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct acpi_device *adev = NULL; - struct device *bus_dev = &bridge->bus->dev; - struct pci_config_window *cfg = bridge->bus->sysdata; - if (!acpi_disabled) - adev = to_acpi_device(cfg->parent); + if (!acpi_disabled) { + struct acpi_device *adev = NULL; + struct device *bus_dev = &bridge->bus->dev; + struct pci_config_window *cfg = bridge->bus->sysdata; - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + adev = to_acpi_device(cfg->parent); + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + } return 0; } -- Gitee From d7045c6e0388b19cd2231c19d8e2a95680eed312 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 157/953] LoongArch: KVM: Add kvm related header files ANBZ: #8436 commit b37e6b680e3a4fad40d8c7b92cfe9b2806c6248e upstream. Add LoongArch KVM related header files, including kvm.h, kvm_host.h and kvm_types.h. All of those are about LoongArch virtualization features and kvm interfaces. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_host.h | 237 +++++++++++++++++++++++++ arch/loongarch/include/asm/kvm_types.h | 11 ++ arch/loongarch/include/uapi/asm/kvm.h | 108 +++++++++++ include/uapi/linux/kvm.h | 9 + 4 files changed, 365 insertions(+) create mode 100644 arch/loongarch/include/asm/kvm_host.h create mode 100644 arch/loongarch/include/asm/kvm_types.h create mode 100644 arch/loongarch/include/uapi/asm/kvm.h diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h new file mode 100644 index 000000000000..11328700d4fa --- /dev/null +++ b/arch/loongarch/include/asm/kvm_host.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_HOST_H__ +#define __ASM_LOONGARCH_KVM_HOST_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Loongarch KVM register ids */ +#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) +#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) + +#define KVM_MAX_VCPUS 256 +#define KVM_MAX_CPUCFG_REGS 21 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 0 + +#define KVM_HALT_POLL_NS_DEFAULT 500000 + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 pages; + u64 hugepages; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 int_exits; + u64 idle_exits; + u64 cpucfg_exits; + u64 signal_exits; +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_context { + unsigned long vpid_cache; + struct kvm_vcpu *last_vcpu; +}; + +struct kvm_world_switch { + int (*exc_entry)(void); + int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); + unsigned long page_order; +}; + +#define MAX_PGTABLE_LEVELS 4 + +struct kvm_arch { + /* Guest physical mm */ + kvm_pte_t *pgd; + unsigned long gpa_size; + unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; + unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; + unsigned int root_level; + + s64 time_offset; + struct kvm_context __percpu *vmcs; +}; + +#define CSR_MAX_NUMS 0x800 + +struct loongarch_csrs { + unsigned long csrs[CSR_MAX_NUMS]; +}; + +/* Resume Flags */ +#define RESUME_HOST 0 +#define RESUME_GUEST 1 + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_IOCSR, /* handle IOCSR request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_EXCEPT, /* A guest exception has been generated */ +}; + +#define KVM_LARCH_FPU (0x1 << 0) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 1) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2) + +struct kvm_vcpu_arch { + /* + * Switch pointer-to-function type to unsigned long + * for loading the value into register directly. + */ + unsigned long host_eentry; + unsigned long guest_eentry; + + /* Pointers stored here for easy accessing from assembly code */ + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + + /* Host registers preserved across guest mode execution */ + unsigned long host_sp; + unsigned long host_tp; + unsigned long host_pgd; + + /* Host CSRs are used when handling exits from guest */ + unsigned long badi; + unsigned long badv; + unsigned long host_ecfg; + unsigned long host_estat; + unsigned long host_percpu; + + /* GPRs */ + unsigned long gprs[32]; + unsigned long pc; + + /* Which auxiliary state is loaded (KVM_LARCH_*) */ + unsigned int aux_inuse; + + /* FPU state */ + struct loongarch_fpu fpu FPU_ALIGN; + + /* CSR state */ + struct loongarch_csrs *csr; + + /* GPR used as IO source/target */ + u32 io_gpr; + + /* KVM register to control count timer */ + u32 count_ctl; + struct hrtimer swtimer; + + /* Bitmask of intr that are pending */ + unsigned long irq_pending; + /* Bitmask of pending intr to be cleared */ + unsigned long irq_clear; + + /* Bitmask of exceptions that are pending */ + unsigned long exception_pending; + unsigned int esubcode; + + /* Cache for pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* vcpu's vpid */ + u64 vpid; + + /* Frequency of stable timer in Hz */ + u64 timer_mhz; + ktime_t expire; + + /* Last CPU the vCPU state was loaded on */ + int last_sched_cpu; + /* mp state */ + struct kvm_mp_state mp_state; + /* cpucfg */ + u32 cpucfg[KVM_MAX_CPUCFG_REGS]; +}; + +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) +{ + return csr->csrs[reg]; +} + +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) +{ + csr->csrs[reg] = val; +} + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* MMU handling */ +void kvm_flush_tlb_all(void); +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +static inline void update_pc(struct kvm_vcpu_arch *arch) +{ + arch->pc += 4; +} + +/* + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. + * @vcpu: Virtual CPU. + * + * Returns: Whether the TLBL exception was likely due to an instruction + * fetch fault rather than a data load fault. + */ +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) +{ + return arch->pc == arch->badv; +} + +/* Misc */ +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} +void kvm_check_vpid(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); +void kvm_init_vmcs(struct kvm *kvm); +void kvm_exc_entry(void); +int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); + +extern unsigned long vpid_mask; +extern const unsigned long kvm_exception_size; +extern const unsigned long kvm_enter_guest_size; +extern struct kvm_world_switch *kvm_loongarch_ops; + +#define SW_GCSR (1 << 0) +#define HW_GCSR (1 << 1) +#define INVALID_GCSR (1 << 2) + +int get_gcsr_flag(int csr); +void set_hw_gcsr(int csr_id, unsigned long val); + +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h new file mode 100644 index 000000000000..2fe1d4bdff66 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_KVM_TYPES_H +#define _ASM_LOONGARCH_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..c6ad2ee6106c --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132..863f84619a15 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL -- Gitee From c06ee8c0ea679bf50fb4ec0183d993446dd53a6a Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 158/953] LoongArch: KVM: Implement kvm module related interface ANBZ: #8436 commit 2bd6ac68726131da32ace9717aa63ff68cf6605c upstream. Implement LoongArch kvm module init, module exit interface, using kvm context to save the vpid info and vcpu world switch interface pointer. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/main.c | 358 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 arch/loongarch/kvm/main.c diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c new file mode 100644 index 000000000000..267c0505ea89 --- /dev/null +++ b/arch/loongarch/kvm/main.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include "trace.h" + +unsigned long vpid_mask; +struct kvm_world_switch *kvm_loongarch_ops; +static int gcsr_flag[CSR_MAX_NUMS]; +static struct kvm_context __percpu *vmcs; + +int get_gcsr_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + return gcsr_flag[csr]; + + return INVALID_GCSR; +} + +static inline void set_gcsr_sw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= SW_GCSR; +} + +static inline void set_gcsr_hw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= HW_GCSR; +} + +/* + * The default value of gcsr_flag[CSR] is 0, and we use this + * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the + * gcsr is software or hardware. It will be used by get/set_gcsr, + * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, + * else use software csr to emulate it. + */ +static void kvm_init_gcsr_flag(void) +{ + set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); + set_gcsr_hw_flag(LOONGARCH_CSR_MISC); + set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); + set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); + set_gcsr_hw_flag(LOONGARCH_CSR_ERA); + set_gcsr_hw_flag(LOONGARCH_CSR_BADV); + set_gcsr_hw_flag(LOONGARCH_CSR_BADI); + set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_ASID); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); + set_gcsr_hw_flag(LOONGARCH_CSR_PGD); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); + set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); + set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); + set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS0); + set_gcsr_hw_flag(LOONGARCH_CSR_KS1); + set_gcsr_hw_flag(LOONGARCH_CSR_KS2); + set_gcsr_hw_flag(LOONGARCH_CSR_KS3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS4); + set_gcsr_hw_flag(LOONGARCH_CSR_KS5); + set_gcsr_hw_flag(LOONGARCH_CSR_KS6); + set_gcsr_hw_flag(LOONGARCH_CSR_KS7); + set_gcsr_hw_flag(LOONGARCH_CSR_TMID); + set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); + set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); + set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); + set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); + set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); + + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); + set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); + set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); + set_gcsr_sw_flag(LOONGARCH_CSR_DERA); + set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); + + set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); + + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); +} + +static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long vpid; + struct kvm_context *context; + + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + vpid = context->vpid_cache + 1; + if (!(vpid & vpid_mask)) { + /* finish round of vpid loop */ + if (unlikely(!vpid)) + vpid = vpid_mask + 1; + + ++vpid; /* vpid 0 reserved for root */ + + /* start new vpid cycle */ + kvm_flush_tlb_all(); + } + + context->vpid_cache = vpid; + vcpu->arch.vpid = vpid; +} + +void kvm_check_vpid(struct kvm_vcpu *vcpu) +{ + int cpu; + bool migrated; + unsigned long ver, old, vpid; + struct kvm_context *context; + + cpu = smp_processor_id(); + /* + * Are we entering guest context on a different CPU to last time? + * If so, the vCPU's guest TLB state on this CPU may be stale. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + migrated = (vcpu->cpu != cpu); + + /* + * Check if our vpid is of an older version + * + * We also discard the stored vpid if we've executed on + * another CPU, as the guest mappings may have changed without + * hypervisor knowledge. + */ + ver = vcpu->arch.vpid & ~vpid_mask; + old = context->vpid_cache & ~vpid_mask; + if (migrated || (ver != old)) { + kvm_update_vpid(vcpu, cpu); + trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); + vcpu->cpu = cpu; + } + + /* Restore GSTAT(0x50).vpid */ + vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; + change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); +} + +static int kvm_loongarch_env_init(void) +{ + int cpu, order; + void *addr; + struct kvm_context *context; + + vmcs = alloc_percpu(struct kvm_context); + if (!vmcs) { + pr_err("kvm: failed to allocate percpu kvm_context\n"); + return -ENOMEM; + } + + kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); + if (!kvm_loongarch_ops) { + free_percpu(vmcs); + vmcs = NULL; + return -ENOMEM; + } + + /* + * PGD register is shared between root kernel and kvm hypervisor. + * So world switch entry should be in DMW area rather than TLB area + * to avoid page fault reenter. + * + * In future if hardware pagetable walking is supported, we won't + * need to copy world switch code to DMW area. + */ + order = get_order(kvm_exception_size + kvm_enter_guest_size); + addr = (void *)__get_free_pages(GFP_KERNEL, order); + if (!addr) { + free_percpu(vmcs); + vmcs = NULL; + kfree(kvm_loongarch_ops); + kvm_loongarch_ops = NULL; + return -ENOMEM; + } + + memcpy(addr, kvm_exc_entry, kvm_exception_size); + memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); + flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); + kvm_loongarch_ops->exc_entry = addr; + kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; + kvm_loongarch_ops->page_order = order; + + vpid_mask = read_csr_gstat(); + vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; + if (vpid_mask) + vpid_mask = GENMASK(vpid_mask - 1, 0); + + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vmcs, cpu); + context->vpid_cache = vpid_mask + 1; + context->last_vcpu = NULL; + } + + kvm_init_gcsr_flag(); + + return 0; +} + +static void kvm_loongarch_env_exit(void) +{ + unsigned long addr; + + if (vmcs) + free_percpu(vmcs); + + if (kvm_loongarch_ops) { + if (kvm_loongarch_ops->exc_entry) { + addr = (unsigned long)kvm_loongarch_ops->exc_entry; + free_pages(addr, kvm_loongarch_ops->page_order); + } + kfree(kvm_loongarch_ops); + } +} + +static int kvm_loongarch_init(void) +{ + int r; + + if (!cpu_has_lvz) { + kvm_info("Hardware virtualization not available\n"); + return -ENODEV; + } + r = kvm_loongarch_env_init(); + if (r) + return r; + + return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); +} + +static void kvm_loongarch_exit(void) +{ + kvm_exit(); + kvm_loongarch_env_exit(); +} + +module_init(kvm_loongarch_init); +module_exit(kvm_loongarch_exit); + +#ifdef MODULE +static const struct cpu_feature kvm_feature[] = { + { .feature = cpu_feature(LOONGARCH_LVZ) }, + {}, +}; +MODULE_DEVICE_TABLE(cpu, kvm_feature); +#endif -- Gitee From 29099a559f63a962d9f6477c99e2242e9e31179f Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 159/953] LoongArch: KVM: Implement kvm hardware enable, disable interface ANBZ: #8436 commit 0d0df3c99d4fbc6561b0addb094e52f19f3c7baa upstream. Implement kvm hardware enable, disable interface, setting the guest config register to enable virtualization features when called the interface. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/main.c | 62 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 267c0505ea89..1c1d5199500e 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -249,6 +249,68 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); } +void kvm_init_vmcs(struct kvm *kvm) +{ + kvm->arch.vmcs = vmcs; +} + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_hardware_enable(void) +{ + unsigned long env, gcfg = 0; + + env = read_csr_gcfg(); + + /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* + * Enable virtualization features granting guest direct control of + * certain features: + * GCI=2: Trap on init or unimplement cache instruction. + * TORU=0: Trap on Root Unimplement. + * CACTRL=1: Root control cache. + * TOP=0: Trap on Previlege. + * TOE=0: Trap on Exception. + * TIT=0: Trap on Timer. + */ + if (env & CSR_GCFG_GCIP_ALL) + gcfg |= CSR_GCFG_GCI_SECURE; + if (env & CSR_GCFG_MATC_ROOT) + gcfg |= CSR_GCFG_MATC_ROOT; + + gcfg |= CSR_GCFG_TIT; + write_csr_gcfg(gcfg); + + kvm_flush_tlb_all(); + + /* Enable using TGID */ + set_csr_gtlbc(CSR_GTLBC_USETGID); + kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", + read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* Flush any remaining guest TLB entries */ + kvm_flush_tlb_all(); +} + static int kvm_loongarch_env_init(void) { int cpu, order; -- Gitee From 015c814a0c4ebaf1e8893d8d12343b84a0ab2039 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 160/953] LoongArch: KVM: Implement VM related functions ANBZ: #8436 commit 482795cb62aa63bbba3a1265fa5b5601be9d13df upstream. Implement LoongArch VM operations: Init and destroy vm interface, allocating memory page to save the vm pgd when init vm. Implement vm check extension, such as getting vcpu number info, memory slots info, and fpu info. And implement vm status description. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vm.c | 94 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 arch/loongarch/kvm/vm.c diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c new file mode 100644 index 000000000000..0a37f6fa8f2d --- /dev/null +++ b/arch/loongarch/kvm/vm.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS(), + STATS_DESC_ICOUNTER(VM, pages), + STATS_DESC_ICOUNTER(VM, hugepages), +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + int i; + + /* Allocate page table to map GPA -> RPA */ + kvm->arch.pgd = kvm_pgd_alloc(); + if (!kvm->arch.pgd) + return -ENOMEM; + + kvm_init_vmcs(kvm); + kvm->arch.gpa_size = BIT(cpu_vabits - 1); + kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; + kvm->arch.invalid_ptes[0] = 0; + kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; +#if CONFIG_PGTABLE_LEVELS > 2 + kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table; +#endif +#if CONFIG_PGTABLE_LEVELS > 3 + kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table; +#endif + for (i = 0; i <= kvm->arch.root_level; i++) + kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + + return 0; +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_SYNC_MMU: + case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_MP_STATE: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + r = num_online_cpus(); + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_IDS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + default: + r = 0; + break; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} -- Gitee From e2a582169c6229fcb40f0ee204621edb5b32ef17 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 161/953] LoongArch: KVM: Add vcpu related header files ANBZ: #8436 commit dfe3dc07fa68f2be1bf8af98656e674e9636d965 upstream. Add LoongArch vcpu related header files, including vcpu csr information, irq number definitions, and some vcpu interfaces. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_csr.h | 211 +++++++++++++++++++++++++ arch/loongarch/include/asm/kvm_vcpu.h | 93 +++++++++++ arch/loongarch/include/asm/loongarch.h | 19 ++- arch/loongarch/kvm/trace.h | 162 +++++++++++++++++++ 4 files changed, 480 insertions(+), 5 deletions(-) create mode 100644 arch/loongarch/include/asm/kvm_csr.h create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h create mode 100644 arch/loongarch/kvm/trace.h diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h new file mode 100644 index 000000000000..724ca8b7b401 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_CSR_H__ +#define __ASM_LOONGARCH_KVM_CSR_H__ + +#include +#include +#include +#include + +#define gcsr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__( \ + " gcsrrd %[val], %[reg]\n\t" \ + : [val] "=r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +#define gcsr_write(v, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__ ( \ + " gcsrwr %[val], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ +}) + +#define gcsr_xchg(v, m, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__( \ + " gcsrxchg %[val], %[mask], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [mask] "r" (m), [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +/* Guest CSRS read and write */ +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD) +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD) +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD) +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD) +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN) +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN) +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC) +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC) +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG) +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG) +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT) +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT) +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA) +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA) +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV) +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV) +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI) +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI) +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY) +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY) + +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID) +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID) +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL) +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL) +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH) +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH) +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD) +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD) +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0) +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0) +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1) +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1) +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE) +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG) +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG) + +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID) +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID) +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1) +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2) +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3) +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3) + +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0) +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0) +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1) +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1) +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2) +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2) +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3) +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3) +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4) +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4) +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5) +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5) +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6) +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6) +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7) +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7) + +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID) +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID) +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG) +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG) +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL) +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL) +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC) +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC) + +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL) +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL) + +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX) +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX) +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY) +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY) +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV) +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV) +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA) +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA) +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE) +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE) +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0) +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0) +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1) +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1) +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI) +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI) +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD) +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD) + +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0) +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0) +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1) +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1) +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2) +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2) +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3) +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3) + +/* Guest related CSRs */ +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC) +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC) +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP) +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG) +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG) +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT) +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT) +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC) +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC) +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC) +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC) + +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name) + +__BUILD_CSR_OP(gcfg) +__BUILD_CSR_OP(gstat) +__BUILD_CSR_OP(gtlbc) +__BUILD_CSR_OP(gintc) +__BUILD_GCSR_OP(llbctl) +__BUILD_GCSR_OP(tlbidx) + +#define set_gcsr_estat(val) \ + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT) +#define clear_gcsr_estat(val) \ + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT) + +#define kvm_read_hw_gcsr(id) gcsr_read(id) +#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id) + +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); + +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid) +{ + return csr->csrs[gid]; +} + +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val) +{ + csr->csrs[gid] = val; +} + +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long val) +{ + csr->csrs[gid] |= val; +} + +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long mask, unsigned long val) +{ + unsigned long _mask = mask; + + csr->csrs[gid] &= ~_mask; + csr->csrs[gid] |= val & _mask; +} + +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h new file mode 100644 index 000000000000..553cfa2b2b1c --- /dev/null +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__ +#define __ASM_LOONGARCH_KVM_VCPU_H__ + +#include +#include + +/* Controlled by 0x5 guest estat */ +#define CPU_SIP0 (_ULCAST_(1)) +#define CPU_SIP1 (_ULCAST_(1) << 1) +#define CPU_PMU (_ULCAST_(1) << 10) +#define CPU_TIMER (_ULCAST_(1) << 11) +#define CPU_IPI (_ULCAST_(1) << 12) + +/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */ +#define CPU_IP0 (_ULCAST_(1)) +#define CPU_IP1 (_ULCAST_(1) << 1) +#define CPU_IP2 (_ULCAST_(1) << 2) +#define CPU_IP3 (_ULCAST_(1) << 3) +#define CPU_IP4 (_ULCAST_(1) << 4) +#define CPU_IP5 (_ULCAST_(1) << 5) +#define CPU_IP6 (_ULCAST_(1) << 6) +#define CPU_IP7 (_ULCAST_(1) << 7) + +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20) + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0 +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff + +typedef union loongarch_instruction larch_inst; +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_emu_idle(struct kvm_vcpu *vcpu); +int kvm_pending_timer(struct kvm_vcpu *vcpu); +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); +void kvm_deliver_intr(struct kvm_vcpu *vcpu); +void kvm_deliver_exception(struct kvm_vcpu *vcpu); + +void kvm_own_fpu(struct kvm_vcpu *vcpu); +void kvm_lose_fpu(struct kvm_vcpu *vcpu); +void kvm_save_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fcsr(struct loongarch_fpu *fpu); + +void kvm_acquire_timer(struct kvm_vcpu *vcpu); +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); +void kvm_reset_timer(struct kvm_vcpu *vcpu); +void kvm_save_timer(struct kvm_vcpu *vcpu); +void kvm_restore_timer(struct kvm_vcpu *vcpu); + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); + +/* + * Loongarch KVM guest interrupt handling + */ +static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + set_bit(irq, &vcpu->arch.irq_pending); + clear_bit(irq, &vcpu->arch.irq_clear); +} + +static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + clear_bit(irq, &vcpu->arch.irq_pending); + set_bit(irq, &vcpu->arch.irq_clear); +} + +static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + /* only one exception can be injected */ + if (!vcpu->arch.exception_pending) { + set_bit(code, &vcpu->arch.exception_pending); + vcpu->arch.esubcode = subcode; + return 0; + } else + return -1; +} + +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index ffa9b1c517ac..67be98049038 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -227,6 +227,7 @@ #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ #define CSR_ECFG_VS_SHIFT 16 #define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1) #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) #define CSR_ECFG_IM_SHIFT 0 #define CSR_ECFG_IM_WIDTH 14 @@ -315,13 +316,14 @@ #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ -#define CSR_GTLBC_RID_SHIFT 16 -#define CSR_GTLBC_RID_WIDTH 8 -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TGID_SHIFT 16 +#define CSR_GTLBC_TGID_WIDTH 8 +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1) +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT) #define CSR_GTLBC_TOTI_SHIFT 13 #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) -#define CSR_GTLBC_USERID_SHIFT 12 -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_USETGID_SHIFT 12 +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT) #define CSR_GTLBC_GMTLBSZ_SHIFT 0 #define CSR_GTLBC_GMTLBSZ_WIDTH 6 #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) @@ -476,6 +478,7 @@ #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ #define CSR_GSTAT_GID_SHIFT 16 #define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1) #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) #define CSR_GSTAT_GIDBIT_SHIFT 4 #define CSR_GSTAT_GIDBIT_WIDTH 6 @@ -526,6 +529,12 @@ #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATP_NEST_SHIFT 2 +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT) +#define CSR_GCFG_MATP_ROOT_SHIFT 1 +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT) +#define CSR_GCFG_MATP_GUEST_SHIFT 0 +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT) #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ #define CSR_GINTC_HC_SHIFT 16 diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h new file mode 100644 index 000000000000..a1e35d655418 --- /dev/null +++ b/arch/loongarch/kvm/trace.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoints for VM enters + */ +DECLARE_EVENT_CLASS(kvm_transition, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", __entry->pc) +); + +DEFINE_EVENT(kvm_transition, kvm_enter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_reenter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_out, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +/* Further exit reasons */ +#define KVM_TRACE_EXIT_IDLE 64 +#define KVM_TRACE_EXIT_CACHE 65 + +/* Tracepoints for VM exits */ +#define kvm_trace_symbol_exit_types \ + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ + { KVM_TRACE_EXIT_CACHE, "CACHE" } + +DECLARE_EVENT_CLASS(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), + __entry->pc) +); + +DEFINE_EVENT(kvm_exit, kvm_exit_idle, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit_cache, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +TRACE_EVENT(kvm_exit_gspr, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word), + TP_ARGS(vcpu, inst_word), + TP_STRUCT__entry( + __field(unsigned int, inst_word) + ), + + TP_fast_assign( + __entry->inst_word = inst_word; + ), + + TP_printk("Inst word: 0x%08x", __entry->inst_word) +); + +#define KVM_TRACE_AUX_SAVE 0 +#define KVM_TRACE_AUX_RESTORE 1 +#define KVM_TRACE_AUX_ENABLE 2 +#define KVM_TRACE_AUX_DISABLE 3 +#define KVM_TRACE_AUX_DISCARD 4 + +#define KVM_TRACE_AUX_FPU 1 + +#define kvm_trace_symbol_aux_op \ + { KVM_TRACE_AUX_SAVE, "save" }, \ + { KVM_TRACE_AUX_RESTORE, "restore" }, \ + { KVM_TRACE_AUX_ENABLE, "enable" }, \ + { KVM_TRACE_AUX_DISABLE, "disable" }, \ + { KVM_TRACE_AUX_DISCARD, "discard" } + +#define kvm_trace_symbol_aux_state \ + { KVM_TRACE_AUX_FPU, "FPU" } + +TRACE_EVENT(kvm_aux, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, + unsigned int state), + TP_ARGS(vcpu, op, state), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(u8, op) + __field(u8, state) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->op = op; + __entry->state = state; + ), + + TP_printk("%s %s PC: 0x%08lx", + __print_symbolic(__entry->op, + kvm_trace_symbol_aux_op), + __print_symbolic(__entry->state, + kvm_trace_symbol_aux_state), + __entry->pc) +); + +TRACE_EVENT(kvm_vpid_change, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid), + TP_ARGS(vcpu, vpid), + TP_STRUCT__entry( + __field(unsigned long, vpid) + ), + + TP_fast_assign( + __entry->vpid = vpid; + ), + + TP_printk("VPID: 0x%08lx", __entry->vpid) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include -- Gitee From 3b99d82e4a3775970a84903baf8f27e1f10e9c8b Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 162/953] LoongArch: KVM: Implement basic vcpu interfaces ANBZ: #8436 commit 2fc3bd86db4b6f6992d4b459879a17b2ae6b2b3d upstream. Implement basic vcpu interfaces, including: 1, vcpu create and destroy interface, saving info into vcpu arch structure such as vcpu exception entrance, vcpu enter guest pointer, etc. Init vcpu timer and set address translation mode when vcpu create. 2, vcpu run interface, handling mmio, iocsr reading fault and deliver interrupt, lose fpu before vcpu enter guest. 3, vcpu handle exit interface, getting the exit code by ESTAT register and using kvm exception vector to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100644 arch/loongarch/kvm/vcpu.c diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c new file mode 100644 index 000000000000..349cecca1e62 --- /dev/null +++ b/arch/loongarch/kvm/vcpu.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" + +/* + * kvm_check_requests - check and handle pending vCPU requests + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + */ +static int kvm_check_requests(struct kvm_vcpu *vcpu) +{ + if (!kvm_request_pending(vcpu)) + return RESUME_GUEST; + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ + + if (kvm_dirty_ring_check_request(vcpu)) + return RESUME_HOST; + + return RESUME_GUEST; +} + +/* + * Check and handle pending signal and vCPU requests etc + * Run with irq enabled and preempt enabled + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + * < 0 if we should exit to userspace, where the return value + * indicates an error + */ +static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) +{ + int ret; + + /* + * Check conditions before entering the guest + */ + ret = xfer_to_guest_mode_handle_work(vcpu); + if (ret < 0) + return ret; + + ret = kvm_check_requests(vcpu); + + return ret; +} + +/* + * Called with irq enabled + * + * Return: RESUME_GUEST if we should enter the guest, and irq disabled + * Others if we should exit to userspace + */ +static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) +{ + int ret; + + do { + ret = kvm_enter_guest_check(vcpu); + if (ret != RESUME_GUEST) + break; + + /* + * Handle vcpu timer, interrupts, check requests and + * check vmid before vcpu enter guest + */ + local_irq_disable(); + kvm_acquire_timer(vcpu); + kvm_deliver_intr(vcpu); + kvm_deliver_exception(vcpu); + /* Make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, IN_GUEST_MODE); + kvm_check_vpid(vcpu); + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); + /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + + if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + /* make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); + ret = -EAGAIN; + } + } while (ret != RESUME_GUEST); + + return ret; +} + +/* + * Return 1 for resume guest and "<= 0" for resume host. + */ +static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + unsigned long estat = vcpu->arch.host_estat; + u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + + vcpu->mode = OUTSIDE_GUEST_MODE; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + + guest_timing_exit_irqoff(); + guest_state_exit_irqoff(); + local_irq_enable(); + + trace_kvm_exit(vcpu, ecode); + if (ecode) { + ret = kvm_handle_fault(vcpu, ecode); + } else { + WARN(!intr, "vm exiting with suspicious irq\n"); + ++vcpu->stat.int_exits; + } + + if (ret == RESUME_GUEST) + ret = kvm_pre_enter_guest(vcpu); + + if (ret != RESUME_GUEST) { + local_irq_disable(); + return ret; + } + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_reenter(vcpu); + + return RESUME_GUEST; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + unsigned long timer_hz; + struct loongarch_csrs *csr; + + vcpu->arch.vpid = 0; + + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + + vcpu->arch.handle_exit = kvm_handle_exit; + vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; + vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); + if (!vcpu->arch.csr) + return -ENOMEM; + + /* + * All kvm exceptions share one exception entry, and host <-> guest + * switch also switch ECFG.VS field, keep host ECFG.VS info here. + */ + vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* + * Initialize guest register state to valid architectural reset state. + */ + timer_hz = calc_const_freq(); + kvm_init_timer(vcpu, timer_hz); + + /* Set Initialize mode for guest */ + csr = vcpu->arch.csr; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); + + /* Set cpuid */ + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); + + /* Start with no pending virtual guest interrupts */ + csr->csrs[LOONGARCH_CSR_GINTC] = 0; + + return 0; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + int cpu; + struct kvm_context *context; + + hrtimer_cancel(&vcpu->arch.swtimer); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kfree(vcpu->arch.csr); + + /* + * If the vCPU is freed and reused as another vCPU, we don't want the + * matching pointer wrongly hanging around in last_vcpu. + */ + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (context->last_vcpu == vcpu) + context->last_vcpu = NULL; + } +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + int r = -EINTR; + struct kvm_run *run = vcpu->run; + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_complete_mmio_read(vcpu, run); + vcpu->mmio_needed = 0; + } + + if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { + if (!run->iocsr_io.is_write) + kvm_complete_iocsr_read(vcpu, run); + } + + if (run->immediate_exit) + return r; + + /* Clear exit_reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + lose_fpu(1); + vcpu_load(vcpu); + kvm_sigset_activate(vcpu); + r = kvm_pre_enter_guest(vcpu); + if (r != RESUME_GUEST) + goto out; + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_enter(vcpu); + r = kvm_loongarch_ops->enter_guest(run, vcpu); + + trace_kvm_out(vcpu); + /* + * Guest exit is already recorded at kvm_handle_exit() + * return value must not be RESUME_GUEST + */ + local_irq_enable(); +out: + kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); + + return r; +} -- Gitee From dd2e258bb26d1fb34912d91e846574973081479b Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 163/953] LoongArch: KVM: Implement basic vcpu ioctl interfaces ANBZ: #8436 commit f6deff355b5c7072a05232f0861cfdfe372c6bfd upstream. Implement basic vcpu ioctl interfaces, including: 1, vcpu KVM_ENABLE_CAP ioctl interface. 2, vcpu get registers and set registers operations, it is called when user space use the ioctl interface to get or set regs. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 349cecca1e62..487065565909 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -141,6 +141,267 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) +{ + unsigned long gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 get from GINTC */ + gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; + *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); + return 0; + } + + /* + * Get software CSR state since software state is consistent + * with hardware for synchronous ioctl + */ + *val = kvm_read_sw_gcsr(csr, id); + + return 0; +} + +static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) +{ + int ret = 0, gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 inject through GINTC */ + gintc = (val >> 2) & 0xff; + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); + + gintc = val & ~(0xffUL << 2); + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); + + return ret; + } + + kvm_write_sw_gcsr(csr, id, val); + + return ret; +} + +static int kvm_get_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 *v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_getcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + *v = vcpu->arch.cpucfg[id]; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + *v = drdtime() + vcpu->kvm->arch.time_offset; + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = kvm_get_one_reg(vcpu, reg, &v); + if (ret) + return ret; + ret = put_user(v, (u64 __user *)(long)reg->addr); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_setcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + vcpu->arch.cpucfg[id] = (u32)v; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + /* + * gftoffset is relative with board, not vcpu + * only set for the first time for smp system + */ + if (vcpu->vcpu_id == 0) + vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); + break; + case KVM_REG_LOONGARCH_VCPU_RESET: + kvm_reset_timer(vcpu); + memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); + memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = get_user(v, (u64 __user *)(long)reg->addr); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + + return kvm_set_one_reg(vcpu, reg, v); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; + + regs->pc = vcpu->arch.pc; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ + vcpu->arch.pc = regs->pc; + + return 0; +} + +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + /* FPU is enabled by default, will support LSX/LASX later. */ + return -EINVAL; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + long r; + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + /* + * Only software CSR should be modified + * + * If any hardware CSR register is modified, vcpu_load/vcpu_put pair + * should be used. Since CSR registers owns by this vcpu, if switch + * to other vcpus, other vcpus need reload CSR registers. + * + * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should + * be clear in vcpu->arch.aux_inuse, and vcpu_load will check + * aux_inuse flag and reload CSR registers form software. + */ + + switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + if (ioctl == KVM_SET_ONE_REG) { + r = kvm_set_reg(vcpu, ®); + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + } else + r = kvm_get_reg(vcpu, ®); + break; + } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From b37282db1a8a5775d9627a37b7af8f990f119fcc Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 164/953] LoongArch: KVM: Implement fpu operations for vcpu ANBZ: #8436 commit 84be4212dcda361b52fc5a071044e5fa237a58d7 upstream. Implement LoongArch fpu related interface for vcpu, such as get fpu, set fpu, own fpu and lose fpu, etc. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 56 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 487065565909..0f19c8b0c028 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -402,6 +402,62 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + fpu->fcc = vcpu->arch.fpu.fcc; + fpu->fcsr = vcpu->arch.fpu.fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + vcpu->arch.fpu.fcc = fpu->fcc; + vcpu->arch.fpu.fcsr = fpu->fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +/* Enable FPU and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + /* Enable FPU */ + set_csr_euen(CSR_EUEN_FPEN); + + kvm_restore_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse |= KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + + preempt_enable(); +} + +/* Save context and disable FPU */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + kvm_save_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); + + /* Disable FPU */ + clear_csr_euen(CSR_EUEN_FPEN); + } + + preempt_enable(); +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From 5b86223e78cdf7e93a285f0505b5d3a1046a078d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 165/953] LoongArch: KVM: Implement vcpu interrupt operations ANBZ: #8436 commit f45ad5b8aa9335bc6b30331b739e778f2f730b35 upstream. Implement vcpu interrupt operations such as vcpu set irq and vcpu clear irq, using set_gcsr_estat() to set irq which is parsed by the irq bitmap. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/interrupt.c | 183 +++++++++++++++++++++++++++++++++ arch/loongarch/kvm/vcpu.c | 38 +++++++ 2 files changed, 221 insertions(+) create mode 100644 arch/loongarch/kvm/interrupt.c diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c new file mode 100644 index 000000000000..4c3f22de4b40 --- /dev/null +++ b/arch/loongarch/kvm/interrupt.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +static unsigned int priority_to_irq[EXCCODE_INT_NUM] = { + [INT_TI] = CPU_TIMER, + [INT_IPI] = CPU_IPI, + [INT_SWI0] = CPU_SIP0, + [INT_SWI1] = CPU_SIP1, + [INT_HWI0] = CPU_IP0, + [INT_HWI1] = CPU_IP1, + [INT_HWI2] = CPU_IP2, + [INT_HWI3] = CPU_IP3, + [INT_HWI4] = CPU_IP4, + [INT_HWI5] = CPU_IP5, + [INT_HWI6] = CPU_IP6, + [INT_HWI7] = CPU_IP7, +}; + +static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_pending); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + set_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + set_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_clear); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + clear_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + clear_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +void kvm_deliver_intr(struct kvm_vcpu *vcpu) +{ + unsigned int priority; + unsigned long *pending = &vcpu->arch.irq_pending; + unsigned long *pending_clr = &vcpu->arch.irq_clear; + + if (!(*pending) && !(*pending_clr)) + return; + + if (*pending_clr) { + priority = __ffs(*pending_clr); + while (priority <= INT_IPI) { + kvm_irq_clear(vcpu, priority); + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + } + + if (*pending) { + priority = __ffs(*pending); + while (priority <= INT_IPI) { + kvm_irq_deliver(vcpu, priority); + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + } +} + +int kvm_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(INT_TI, &vcpu->arch.irq_pending); +} + +/* + * Only support illegal instruction or illegal Address Error exception, + * Other exceptions are injected by hardware in kvm mode + */ +static void _kvm_deliver_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + unsigned long val, vec_size; + + /* + * BADV is added for EXCCODE_ADE exception + * Use PC register (GVA address) if it is instruction exeception + * Else use BADV from host side (GPA address) for data exeception + */ + if (code == EXCCODE_ADE) { + if (subcode == EXSUBCODE_ADEF) + val = vcpu->arch.pc; + else + val = vcpu->arch.badv; + kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val); + } + + /* Set exception instruction */ + kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi); + + /* + * Save CRMD in PRMD + * Set IRQ disabled and PLV0 with CRMD + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD); + kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val); + val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE); + kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val); + + /* Set exception PC address */ + kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc); + + /* + * Set exception code + * Exception and interrupt can be inject at the same time + * Hardware will handle exception first and then extern interrupt + * Exception code is Ecode in ESTAT[16:21] + * Interrupt code in ESTAT[0:12] + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT); + val = (val & ~CSR_ESTAT_EXC) | code; + kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val); + + /* Calculate expcetion entry address */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG); + vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT; + if (vec_size) + vec_size = (1 << vec_size) * 4; + val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY); + vcpu->arch.pc = val + code * vec_size; +} + +void kvm_deliver_exception(struct kvm_vcpu *vcpu) +{ + unsigned int code; + unsigned long *pending = &vcpu->arch.exception_pending; + + if (*pending) { + code = __ffs(*pending); + _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode); + *pending = 0; + vcpu->arch.esubcode = 0; + } +} diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 0f19c8b0c028..7576f5a735ea 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -458,6 +458,44 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) +{ + int intr = (int)irq->irq; + + if (intr > 0) + kvm_queue_irq(vcpu, intr); + else if (intr < 0) + kvm_dequeue_irq(vcpu, -intr); + else { + kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); + return -EINVAL; + } + + kvm_vcpu_kick(vcpu); + + return 0; +} + +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); + + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + + return -ENOIOCTLCMD; +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From 77cd86cca174ade895540b4c152205f0953c7cba Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 166/953] LoongArch: KVM: Implement vcpu load and vcpu put operations ANBZ: #8436 commit 1f4c39b9892e12385e075efa9cb3f014b700204d upstream. Implement LoongArch vcpu load and vcpu put operations, including load csr value into hardware and save csr value into vcpu structure. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 203 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 203 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 7576f5a735ea..b16fe2913e11 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -569,6 +569,209 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) } } +static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + bool migrated; + struct kvm_context *context; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Have we migrated to a different CPU? + * If so, any old guest TLB state may be stale. + */ + migrated = (vcpu->arch.last_sched_cpu != cpu); + + /* + * Was this the last vCPU to run on this CPU? + * If not, any old guest state from this vCPU will have been clobbered. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (migrated || (context->last_vcpu != vcpu)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + context->last_vcpu = vcpu; + + /* Restore timer state regardless */ + kvm_restore_timer(vcpu); + + /* Control guest page CCA attribute */ + change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + + /* Don't bother restoring registers multiple times unless necessary */ + if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) + return 0; + + write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); + + /* Restore guest CSR registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + + /* Restore Root.GINTC from unused Guest.GINTC register */ + write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); + + /* + * We should clear linked load bit to break interrupted atomics. This + * prevents a SC on the next vCPU from succeeding by matching a LL on + * the previous vCPU. + */ + if (vcpu->kvm->created_vcpus > 1) + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); + + vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; + + return 0; +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + if (vcpu->arch.last_sched_cpu != cpu) { + kvm_debug("[%d->%d]KVM vCPU[%d] switch\n", + vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + /* + * Migrate the timer interrupt to the current CPU so that it + * always interrupts the guest and synchronously triggers a + * guest timer interrupt. + */ + kvm_migrate_count(vcpu); + } + + /* Restore guest state to registers */ + _kvm_vcpu_load(vcpu, cpu); + local_irq_restore(flags); +} + +static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_lose_fpu(vcpu); + + /* + * Update CSR state from hardware if software CSR state is stale, + * most CSR registers are kept unchanged during process context + * switch except CSR registers like remaining timer tick value and + * injected interrupt state. + */ + if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) + goto out; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + + vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; + +out: + kvm_save_timer(vcpu); + /* Save Root.GINTC into unused Guest.GINTC register */ + csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); + + return 0; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + cpu = smp_processor_id(); + vcpu->arch.last_sched_cpu = cpu; + + /* Save guest state in registers */ + _kvm_vcpu_put(vcpu, cpu); + local_irq_restore(flags); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { int r = -EINTR; -- Gitee From 52739e930a27869f296e2bd822bfe5df91988cba Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 167/953] LoongArch: KVM: Implement misc vcpu related interfaces ANBZ: #8436 commit 93a9a197b680dd6b98afb629d2bfb3bd51a83d84 upstream. 1, Implement LoongArch vcpu status description such as idle exits counter, signal exits counter, cpucfg exits counter, etc. 2, Implement some misc vcpu relaterd interfaces, such as vcpu runnable, vcpu should kick, vcpu dump regs, etc. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 120 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b16fe2913e11..73d0c2b9c1a5 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -13,6 +13,23 @@ #define CREATE_TRACE_POINTS #include "trace.h" +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, idle_exits), + STATS_DESC_COUNTER(VCPU, cpucfg_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + /* * kvm_check_requests - check and handle pending vCPU requests * @@ -141,6 +158,109 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) return RESUME_GUEST; } +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.irq_pending) && + vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return kvm_pending_timer(vcpu) || + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + + kvm_debug("vCPU Register Dump:\n"); + kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); + kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); + + for (i = 0; i < 32; i += 4) { + kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + + kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n", + kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); + + kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + *mp_state = vcpu->arch.mp_state; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + int ret = 0; + + switch (mp_state->mp_state) { + case KVM_MP_STATE_RUNNABLE: + vcpu->arch.mp_state = *mp_state; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return -EINVAL; +} + +/** + * kvm_migrate_count() - Migrate timer. + * @vcpu: Virtual CPU. + * + * Migrate hrtimer to the current CPU by cancelling and restarting it + * if the hrtimer is active. + * + * Must be called when the vCPU is migrated to a different CPU, so that + * the timer can interrupt the guest at the new CPU, and the timer irq can + * be delivered to the vCPU. + */ +static void kvm_migrate_count(struct kvm_vcpu *vcpu) +{ + if (hrtimer_cancel(&vcpu->arch.swtimer)) + hrtimer_restart(&vcpu->arch.swtimer); +} + static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) { unsigned long gintc; -- Gitee From 0325a05f68c8a8ed4bc2e37ab3f0472f951ac5fe Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 168/953] LoongArch: KVM: Implement vcpu timer operations ANBZ: #8436 commit a5857b9ff6e06cac4adc8d671a74b7739a88623e upstream. Implement LoongArch vcpu timer operations such as init kvm timer, acquire kvm timer, save kvm timer and restore kvm timer. When vcpu exit, we use kvm soft timer to emulate hardware timer. If timeout happens, the vcpu timer interrupt will be set and it is going to be handled at vcpu next entrance. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/timer.c | 197 +++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 arch/loongarch/kvm/timer.c diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c new file mode 100644 index 000000000000..284bf553fefe --- /dev/null +++ b/arch/loongarch/kvm/timer.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * ktime_to_tick() - Scale ktime_t to timer tick value. + */ +static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now) +{ + u64 delta; + + delta = ktime_to_ns(now); + return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC); +} + +static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) +{ + return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); +} + +/* + * Push timer forward on timeout. + * Handle an hrtimer event by push the hrtimer forward a period. + */ +static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, period; + + /* Add periodic tick to current expire time */ + cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); + if (cfg & CSR_TCFG_PERIOD) { + period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); + hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); + return HRTIMER_RESTART; + } else + return HRTIMER_NORESTART; +} + +/* Low level hrtimer wake routine */ +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer); + kvm_queue_irq(vcpu, INT_TI); + rcuwait_wake_up(&vcpu->wait); + + return kvm_count_timeout(vcpu); +} + +/* + * Initialise the timer to the specified frequency, zero it + */ +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) +{ + vcpu->arch.timer_mhz = timer_hz >> 20; + + /* Starting at 0 */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); +} + +/* + * Restore hard timer state and enable guest to access timer registers + * without trap, should be called with irq disabled + */ +void kvm_acquire_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg; + + cfg = read_csr_gcfg(); + if (!(cfg & CSR_GCFG_TIT)) + return; + + /* Enable guest access to hard timer */ + write_csr_gcfg(cfg & ~CSR_GCFG_TIT); + + /* + * Freeze the soft-timer and sync the guest stable timer with it. We do + * this with interrupts disabled to avoid latency. + */ + hrtimer_cancel(&vcpu->arch.swtimer); +} + +/* + * Restore soft timer state from saved context. + */ +void kvm_restore_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, delta, period; + ktime_t expire, now; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Set guest stable timer cfg csr + */ + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + if (!(cfg & CSR_TCFG_EN)) { + /* Guest timer is disabled, just restore timer registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + return; + } + + /* + * Set remainder tick value if not expired + */ + now = ktime_get(); + expire = vcpu->arch.expire; + if (ktime_before(now, expire)) + delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); + else { + if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + } else + delta = 0; + /* + * Inject timer here though sw timer should inject timer + * interrupt async already, since sw timer may be cancelled + * during injecting intr async in function kvm_acquire_timer + */ + kvm_queue_irq(vcpu, INT_TI); + } + + write_gcsr_timertick(delta); +} + +/* + * Save guest timer state and switch to software emulation of guest + * timer. The hard timer must already be in use, so preemption should be + * disabled. + */ +static void _kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long ticks, delta; + ktime_t expire; + struct loongarch_csrs *csr = vcpu->arch.csr; + + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + delta = tick_to_ns(vcpu, ticks); + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (ticks) { + /* + * Update hrtimer to use new timeout + * HRTIMER_MODE_PINNED is suggested since vcpu may run in + * the same physical cpu in next time + */ + hrtimer_cancel(&vcpu->arch.swtimer); + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } else + /* + * Inject timer interrupt so that hall polling can dectect and exit + */ + kvm_queue_irq(vcpu, INT_TI); +} + +/* + * Save guest timer state and switch to soft guest timer if hard timer was in + * use. + */ +void kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg; + struct loongarch_csrs *csr = vcpu->arch.csr; + + preempt_disable(); + cfg = read_csr_gcfg(); + if (!(cfg & CSR_GCFG_TIT)) { + /* Disable guest use of hard timer */ + write_csr_gcfg(cfg | CSR_GCFG_TIT); + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); + } + + /* Save timer-related state to vCPU context */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + preempt_enable(); +} + +void kvm_reset_timer(struct kvm_vcpu *vcpu) +{ + write_gcsr_timercfg(0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); + hrtimer_cancel(&vcpu->arch.swtimer); +} -- Gitee From 1e96f56b07d72698e201c5267b6112db7f19707b Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 169/953] LoongArch: KVM: Implement virtual machine tlb operations ANBZ: #8436 commit d7f4ed4b22908077bd219dd172b27b51927aff6d upstream. Implement LoongArch virtual machine tlb operations such as flush tlb by specific gpa parameter and flush all of the virtual machine's tlbs. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/tlb.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 arch/loongarch/kvm/tlb.c diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c new file mode 100644 index 000000000000..02535df6b51f --- /dev/null +++ b/arch/loongarch/kvm/tlb.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * kvm_flush_tlb_all() - Flush all root TLB entries for guests. + * + * Invalidate all entries including GVA-->GPA and GPA-->HPA mappings. + */ +void kvm_flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + invtlb_all(INVTLB_ALLGID, 0, 0); + local_irq_restore(flags); +} + +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) +{ + unsigned long flags; + + local_irq_save(flags); + gpa &= (PAGE_MASK << 1); + invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); + local_irq_restore(flags); +} -- Gitee From 60b999ff659f6a7001d0ca5aabb18300006e8da9 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 170/953] LoongArch: KVM: Implement kvm mmu operations ANBZ: #8436 commit 752e2cd7b4fb412f3e008493e0195e357bab9773 upstream. Implement LoongArch kvm mmu, it is used to switch gpa to hpa when guest exit because of address translation exception. This patch implement: allocating gpa page table, searching gpa from it, and flushing guest gpa in the table. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_mmu.h | 139 ++++ arch/loongarch/kvm/mmu.c | 914 +++++++++++++++++++++++++++ 2 files changed, 1053 insertions(+) create mode 100644 arch/loongarch/include/asm/kvm_mmu.h create mode 100644 arch/loongarch/kvm/mmu.c diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..099bafc6f797 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_MMU_H__ +#define __ASM_LOONGARCH_KVM_MMU_H__ + +#include +#include +#include + +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels + * for which pages need to be cached. + */ +#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) + +#define _KVM_FLUSH_PGTABLE 0x1 +#define _KVM_HAS_PGMASK 0x2 +#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) +#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) + +typedef unsigned long kvm_pte_t; +typedef struct kvm_ptw_ctx kvm_ptw_ctx; +typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx); + +struct kvm_ptw_ctx { + kvm_pte_ops ops; + unsigned long flag; + + /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */ + unsigned long mask; + unsigned long gfn; + + /* page walk mmu info */ + unsigned int level; + unsigned long pgtable_shift; + unsigned long invalid_entry; + unsigned long *invalid_ptes; + unsigned int *pte_shifts; + void *opaque; + + /* free pte table page list */ + struct list_head list; +}; + +kvm_pte_t *kvm_pgd_alloc(void); + +static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) +{ + WRITE_ONCE(*ptep, val); +} + +static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } +static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } +static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } + +static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) +{ + return pte | _PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) +{ + return pte & ~_PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) +{ + return pte | _PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) +{ + return pte & ~_PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) +{ + return pte | _PAGE_HUGE; +} + +static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) +{ + return pte & ~_PAGE_HUGE; +} + +static inline int kvm_need_flush(kvm_ptw_ctx *ctx) +{ + return ctx->flag & _KVM_FLUSH_PGTABLE; +} + +static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table, + phys_addr_t addr) +{ + + return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1)); +} + +static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary, size; + + size = 0x1UL << ctx->pgtable_shift; + boundary = (addr + size) & ~(size - 1); + return (boundary - 1 < end - 1) ? boundary : end; +} + +static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + if (!ctx || ctx->level == 0) + return !!(*entry & _PAGE_PRESENT); + + return *entry != ctx->invalid_entry; +} + +static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + return *entry == ctx->invalid_entry; +} + +static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx) +{ + ctx->level--; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx) +{ + ctx->level++; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */ diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c new file mode 100644 index 000000000000..80480df5f550 --- /dev/null +++ b/arch/loongarch/kvm/mmu.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) +{ + ctx->level = kvm->arch.root_level; + /* pte table */ + ctx->invalid_ptes = kvm->arch.invalid_ptes; + ctx->pte_shifts = kvm->arch.pte_shifts; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; + ctx->opaque = kvm; +} + +/* + * Mark a range of guest physical address space old (all accesses fault) in the + * VM's GPA page table to allow detection of commonly used pages. + */ +static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + if (kvm_pte_young(*pte)) { + *pte = kvm_pte_mkold(*pte); + return 1; + } + + return 0; +} + +/* + * Mark a range of guest physical address space clean (writes fault) in the VM's + * GPA page table to allow dirty page tracking. + */ +static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + gfn_t offset; + kvm_pte_t val; + + val = *pte; + /* + * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end + * may cross hugepage, for first huge page parameter addr is equal to + * start, however for the second huge page addr is base address of + * this huge page, rather than start or end address + */ + if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) { + offset = (addr >> PAGE_SHIFT) - ctx->gfn; + if (!(BIT(offset) & ctx->mask)) + return 0; + } + + /* + * Need not split huge page now, just set write-proect pte bit + * Split huge page until next write fault + */ + if (kvm_pte_dirty(val)) { + *pte = kvm_pte_mkclean(val); + return 1; + } + + return 0; +} + +/* + * Clear pte entry + */ +static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + struct kvm *kvm; + + kvm = ctx->opaque; + if (ctx->level) + kvm->stat.hugepages--; + else + kvm->stat.pages--; + + *pte = ctx->invalid_entry; + + return 1; +} + +/* + * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. + * + * Allocate a blank KVM GPA page directory (PGD) for representing guest physical + * to host physical page mappings. + * + * Returns: Pointer to new KVM GPA page directory. + * NULL on allocation failure. + */ +kvm_pte_t *kvm_pgd_alloc(void) +{ + kvm_pte_t *pgd; + + pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0); + if (pgd) + pgd_init((void *)pgd); + + return pgd; +} + +static void _kvm_pte_init(void *addr, unsigned long val) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + do { + p[0] = val; + p[1] = val; + p[2] = val; + p[3] = val; + p[4] = val; + p += 8; + p[-3] = val; + p[-2] = val; + p[-1] = val; + } while (p != end); +} + +/* + * Caller must hold kvm->mm_lock + * + * Walk the page tables of kvm to find the PTE corresponding to the + * address @addr. If page tables don't exist for @addr, they will be created + * from the MMU cache if @cache is not NULL. + */ +static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache, + unsigned long addr, int level) +{ + kvm_ptw_ctx ctx; + kvm_pte_t *entry, *child; + + kvm_ptw_prepare(kvm, &ctx); + child = kvm->arch.pgd; + while (ctx.level > level) { + entry = kvm_pgtable_offset(&ctx, child, addr); + if (kvm_pte_none(&ctx, entry)) { + if (!cache) + return NULL; + + child = kvm_mmu_memory_cache_alloc(cache); + _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); + kvm_set_pte(entry, __pa(child)); + } else if (kvm_pte_huge(*entry)) { + return entry; + } else + child = (kvm_pte_t *)__va(PHYSADDR(*entry)); + kvm_ptw_enter(&ctx); + } + + entry = kvm_pgtable_offset(&ctx, child, addr); + + return entry; +} + +/* + * Page walker for VM shadow mmu at last level + * The last level is small pte page or huge pmd page + */ +static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = addr + (0x1UL << ctx->pgtable_shift); + if (!kvm_pte_present(ctx, entry)) + continue; + + ret |= ctx->ops(entry, addr, ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page table dir level + */ +static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + if (kvm_pte_huge(*entry)) { + ret |= ctx->ops(entry, addr, ctx); + continue; + } + + kvm_ptw_enter(ctx); + if (ctx->level == 0) + ret |= kvm_ptw_leaf(entry, addr, next, ctx); + else + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page root table + */ +static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next; + kvm_pte_t *entry; + + ret = 0; + entry = kvm_pgtable_offset(ctx, dir, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + kvm_ptw_enter(ctx); + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + return ret; +} + +/* + * kvm_flush_range() - Flush a range of guest physical addresses. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * @lock: Whether to hold mmu_lock or not + * + * Flushes a range of GPA mappings from the GPA page tables. + */ +static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) +{ + int ret; + kvm_ptw_ctx ctx; + struct list_head *pos, *temp; + + ctx.ops = kvm_flush_pte; + ctx.flag = _KVM_FLUSH_PGTABLE; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + if (lock) { + spin_lock(&kvm->mmu_lock); + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + spin_unlock(&kvm->mmu_lock); + } else + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + + /* Flush vpid for each vCPU individually */ + if (ret) + kvm_flush_remote_tlbs(kvm); + + /* + * free pte table page after mmu_lock + * the pte table page is linked together with ctx.list + */ + list_for_each_safe(pos, temp, &ctx.list) { + list_del(pos); + free_page((unsigned long)pos); + } +} + +/* + * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * + * Make a range of GPA mappings clean so that guest writes will fault and + * trigger dirty page logging. + * + * The caller must hold the @kvm->mmu_lock spinlock. + * + * Returns: Whether any GPA mappings were modified, which would require + * derived mappings (GVA page tables & TLB enties) to be + * invalidated. + */ +static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) +{ + kvm_ptw_ctx ctx; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = 0; + kvm_ptw_prepare(kvm, &ctx); + return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire @kvm->mmu_lock. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) +{ + kvm_ptw_ctx ctx; + gfn_t base_gfn = slot->base_gfn + gfn_offset; + gfn_t start = base_gfn + __ffs(mask); + gfn_t end = base_gfn + __fls(mask) + 1; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = _KVM_HAS_PGMASK; + ctx.mask = mask; + ctx.gfn = base_gfn; + kvm_ptw_prepare(kvm, &ctx); + + kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int needs_flush; + + /* + * If dirty page logging is enabled, write protect all pages in the slot + * ready for dirty logging. + * + * There is no need to do this in any of the following cases: + * CREATE: No dirty mappings will already exist. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot() + */ + if (change == KVM_MR_FLAGS_ONLY && + (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + spin_lock(&kvm->mmu_lock); + /* Write protect GPA page table entries */ + needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, + new->base_gfn + new->npages); + spin_unlock(&kvm->mmu_lock); + if (needs_flush) + kvm_flush_remote_tlbs(kvm); + } +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + /* + * The slot has been made invalid (ready for moving or deletion), so we + * need to ensure that it can no longer be accessed by any guest vCPUs. + */ + kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); +} + +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_flush_pte; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + unsigned long prot_bits; + kvm_pte_t *ptep; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + gpa_t gpa = range->start << PAGE_SHIFT; + + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep) + return false; + + /* Replacing an absent or old page doesn't need flushes */ + if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { + kvm_set_pte(ptep, 0); + return false; + } + + /* Fill new pte if write protected or page migrated */ + prot_bits = _PAGE_PRESENT | __READABLE; + prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); + + /* + * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support + * _PAGE_WRITE for map_page_fast if next page write fault + * _PAGE_DIRTY since gpa has already recorded as dirty page + */ + prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); + + return true; +} + +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_mkold_pte; + kvm_ptw_prepare(kvm, &ctx); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + gpa_t gpa = range->start << PAGE_SHIFT; + kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + + if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep)) + return true; + + return false; +} + +/* + * kvm_map_page_fast() - Fast path GPA fault handler. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Perform fast path GPA fault handling, doing all that can be done without + * calling into KVM. This handles marking old pages young (for idle page + * tracking), and dirtying of clean pages (for dirty page logging). + * + * Returns: 0 on success, in which case we can update derived mappings and + * resume guest execution. + * -EFAULT on failure due to absent GPA mapping or write to + * read-only page, in which case KVM must be consulted. + */ +static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret = 0; + kvm_pfn_t pfn = 0; + kvm_pte_t *ptep, changed, new; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + + spin_lock(&kvm->mmu_lock); + + /* Fast path - just check GPA page table for an existing entry */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep || !kvm_pte_present(NULL, ptep)) { + ret = -EFAULT; + goto out; + } + + /* Track access to pages marked old */ + new = *ptep; + if (!kvm_pte_young(new)) + new = kvm_pte_mkyoung(new); + /* call kvm_set_pfn_accessed() after unlock */ + + if (write && !kvm_pte_dirty(new)) { + if (!kvm_pte_write(new)) { + ret = -EFAULT; + goto out; + } + + if (kvm_pte_huge(new)) { + /* + * Do not set write permission when dirty logging is + * enabled for HugePages + */ + slot = gfn_to_memslot(kvm, gfn); + if (kvm_slot_dirty_track_enabled(slot)) { + ret = -EFAULT; + goto out; + } + } + + /* Track dirtying of writeable pages */ + new = kvm_pte_mkdirty(new); + } + + changed = new ^ (*ptep); + if (changed) { + kvm_set_pte(ptep, new); + pfn = kvm_pte_pfn(new); + } + spin_unlock(&kvm->mmu_lock); + + /* + * Fixme: pfn may be freed after mmu_lock + * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? + */ + if (kvm_pte_young(changed)) + kvm_set_pfn_accessed(pfn); + + if (kvm_pte_dirty(changed)) { + mark_page_dirty(kvm, gfn); + kvm_set_pfn_dirty(pfn); + } + return ret; +out: + spin_unlock(&kvm->mmu_lock); + return ret; +} + +static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, unsigned long map_size, bool write) +{ + size_t size; + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + + /* Disable dirty logging on HugePages */ + if (kvm_slot_dirty_track_enabled(memslot) && write) + return false; + + size = memslot->npages * PAGE_SIZE; + gpa_start = memslot->base_gfn << PAGE_SHIFT; + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD for userspace and GPA cannot be mapped with stage-2 + * PMD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * Lookup the mapping level for @gfn in the current mm. + * + * WARNING! Use of host_pfn_mapping_level() requires the caller and the end + * consumer to be tied into KVM's handlers for MMU notifier events! + * + * There are several ways to safely use this helper: + * + * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * consuming it. In this case, mmu_lock doesn't need to be held during the + * lookup, but it does need to be held while checking the MMU notifier. + * + * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation + * event for the hva. This can be done by explicit checking the MMU notifier + * or by ensuring that KVM already has a valid mapping that covers the hva. + * + * - Do not use the result to install new mappings, e.g. use the host mapping + * level only to decide whether or not to zap an entry. In this case, it's + * not required to hold mmu_lock (though it's highly likely the caller will + * want to hold mmu_lock anyways, e.g. to modify SPTEs). + * + * Note! The lookup can still race with modifications to host page tables, but + * the above "rules" ensure KVM will not _consume_ the result of the walk if a + * race with the primary MMU occurs. + */ +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = 0; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (kvm_pte_huge(pmd_val(pmd))) + level = 1; + +out: + local_irq_restore(flags); + return level; +} + +/* + * Split huge page + */ +static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) +{ + int i; + kvm_pte_t val, *child; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache; + + memcache = &vcpu->arch.mmu_page_cache; + child = kvm_mmu_memory_cache_alloc(memcache); + val = kvm_pte_mksmall(*ptep); + for (i = 0; i < PTRS_PER_PTE; i++) { + kvm_set_pte(child + i, val); + val += PAGE_SIZE; + } + + /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ + kvm_set_pte(ptep, __pa(child)); + + kvm->stat.hugepages--; + kvm->stat.pages += PTRS_PER_PTE; + + return child + (gfn & (PTRS_PER_PTE - 1)); +} + +/* + * kvm_map_page() - Map a guest physical page. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Handle GPA faults by creating a new GPA mapping (or updating an existing + * one). + * + * This takes care of marking pages young or dirty (idle/dirty page tracking), + * asking KVM for the corresponding PFN, and creating a mapping in the GPA page + * tables. Derived mappings (GVA page tables and TLBs) must be handled by the + * caller. + * + * Returns: 0 on success + * -EFAULT if there is no memory region at @gpa or a write was + * attempted to a read-only memory region. This is usually handled + * as an MMIO access. + */ +static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + bool writeable; + int srcu_idx, err, retry_no = 0, level; + unsigned long hva, mmu_seq, prot_bits; + kvm_pfn_t pfn; + kvm_pte_t *ptep, new_pte; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + + /* Try the fast path to handle old / clean pages */ + srcu_idx = srcu_read_lock(&kvm->srcu); + err = kvm_map_page_fast(vcpu, gpa, write); + if (!err) + goto out; + + memslot = gfn_to_memslot(kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + if (kvm_is_error_hva(hva) || (write && !writeable)) { + err = -EFAULT; + goto out; + } + + /* We need a minimum of cached pages ready for page table creation */ + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + if (err) + goto out; + +retry: + /* + * Used to check for invalidations in progress, of the pfn that is + * returned by pfn_to_pfn_prot below. + */ + mmu_seq = kvm->mmu_invalidate_seq; + /* + * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in + * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * risk the page we get a reference to getting unmapped before we have a + * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. + * + * This smp_rmb() pairs with the effective smp_wmb() of the combination + * of the pte_unmap_unlock() after the PTE is zapped, and the + * spin_lock() in kvm_mmu_invalidate_invalidate_() before + * mmu_invalidate_seq is incremented. + */ + smp_rmb(); + + /* Slow path - ask KVM core whether we can access this GPA */ + pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); + if (is_error_noslot_pfn(pfn)) { + err = -EFAULT; + goto out; + } + + /* Check if an invalidation has taken place since we got pfn */ + spin_lock(&kvm->mmu_lock); + if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { + /* + * This can happen when mappings are changed asynchronously, but + * also synchronously if a COW is triggered by + * gfn_to_pfn_prot(). + */ + spin_unlock(&kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (retry_no > 100) { + retry_no = 0; + schedule(); + } + retry_no++; + goto retry; + } + + /* + * For emulated devices such virtio device, actual cache attribute is + * determined by physical machine. + * For pass through physical device, it should be uncachable + */ + prot_bits = _PAGE_PRESENT | __READABLE; + if (pfn_valid(pfn)) + prot_bits |= _CACHE_CC; + else + prot_bits |= _CACHE_SUC; + + if (writeable) { + prot_bits |= _PAGE_WRITE; + if (write) + prot_bits |= __WRITEABLE; + } + + /* Disable dirty logging on HugePages */ + level = 0; + if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) { + level = 0; + } else { + level = host_pfn_mapping_level(kvm, gfn, memslot); + if (level == 1) { + gfn = gfn & ~(PTRS_PER_PTE - 1); + pfn = pfn & ~(PTRS_PER_PTE - 1); + } + } + + /* Ensure page tables are allocated */ + ptep = kvm_populate_gpa(kvm, memcache, gpa, level); + new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits)); + if (level == 1) { + new_pte = kvm_pte_mkhuge(new_pte); + /* + * previous pmd entry is invalid_pte_table + * there is invalid tlb with small page + * need flush these invalid tlbs for current vcpu + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + ++kvm->stat.hugepages; + } else if (kvm_pte_huge(*ptep) && write) + ptep = kvm_split_huge(vcpu, ptep, gfn); + else + ++kvm->stat.pages; + kvm_set_pte(ptep, new_pte); + spin_unlock(&kvm->mmu_lock); + + if (prot_bits & _PAGE_DIRTY) { + mark_page_dirty_in_slot(kvm, memslot, gfn); + kvm_set_pfn_dirty(pfn); + } + + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; +} + +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret; + + ret = kvm_map_page(vcpu, gpa, write); + if (ret) + return ret; + + /* Invalidate this entry in the TLB */ + kvm_flush_tlb_gpa(vcpu, gpa); + + return 0; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + return 0; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + kvm_flush_remote_tlbs(kvm); +} -- Gitee From f9069ad20029a8a6a9a65981d64fffa2613b66e6 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 171/953] LoongArch: KVM: Implement handle csr exception ANBZ: #8436 commit da50f5a693ff55ef367a4c5c9145f0bfea3e476d upstream. Implement kvm handle LoongArch vcpu exit caused by reading, writing and exchanging csr. Use kvm_vcpu_arch::csr structure to emulate the software registers. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 105 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 arch/loongarch/kvm/exit.c diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c new file mode 100644 index 000000000000..37bc8a4209bd --- /dev/null +++ b/arch/loongarch/kvm/exit.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) +{ + unsigned long val = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * From LoongArch Reference Manual Volume 1 Chapter 4.2.1 + * For undefined CSR id, return value is 0 + */ + if (get_gcsr_flag(csrid) & SW_GCSR) + val = kvm_read_sw_gcsr(csr, csrid); + else + pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return val; +} + +static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + kvm_write_sw_gcsr(csr, csrid, val); + } else + pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, + unsigned long csr_mask, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + val = (old & ~csr_mask) | (val & csr_mask); + kvm_write_sw_gcsr(csr, csrid, val); + old = old & csr_mask; + } else + pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +{ + unsigned int rd, rj, csrid; + unsigned long csr_mask, val = 0; + + /* + * CSR value mask imm + * rj = 0 means csrrd + * rj = 1 means csrwr + * rj != 0,1 means csrxchg + */ + rd = inst.reg2csr_format.rd; + rj = inst.reg2csr_format.rj; + csrid = inst.reg2csr_format.csr; + + /* Process CSR ops */ + switch (rj) { + case 0: /* process csrrd */ + val = kvm_emu_read_csr(vcpu, csrid); + vcpu->arch.gprs[rd] = val; + break; + case 1: /* process csrwr */ + val = vcpu->arch.gprs[rd]; + val = kvm_emu_write_csr(vcpu, csrid, val); + vcpu->arch.gprs[rd] = val; + break; + default: /* process csrxchg */ + val = vcpu->arch.gprs[rd]; + csr_mask = vcpu->arch.gprs[rj]; + val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val); + vcpu->arch.gprs[rd] = val; + } + + return EMULATE_DONE; +} -- Gitee From 61ff01d740581c7b879da681e1801659615822db Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 172/953] LoongArch: KVM: Implement handle iocsr exception ANBZ: #8436 commit 81efe043a35113ec1352f6eb2b954d02aac368db upstream. Implement kvm handle vcpu iocsr exception, setting the iocsr info into vcpu_run and return to user space to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/inst.h | 16 ++++++ arch/loongarch/kvm/exit.c | 91 +++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 71e1ed4165c8..008a88ead60d 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -65,6 +65,14 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + iocsrrdb_op = 0x19200, + iocsrrdh_op = 0x19201, + iocsrrdw_op = 0x19202, + iocsrrdd_op = 0x19203, + iocsrwrb_op = 0x19204, + iocsrwrh_op = 0x19205, + iocsrwrw_op = 0x19206, + iocsrwrd_op = 0x19207, }; enum reg2i5_op { @@ -318,6 +326,13 @@ struct reg2bstrd_format { unsigned int opcode : 10; }; +struct reg2csr_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int csr : 14; + unsigned int opcode : 8; +}; + struct reg3_format { unsigned int rd : 5; unsigned int rj : 5; @@ -346,6 +361,7 @@ union loongarch_instruction { struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; struct reg2bstrd_format reg2bstrd_format; + struct reg2csr_format reg2csr_format; struct reg3_format reg3_format; struct reg3sa2_format reg3sa2_format; }; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 37bc8a4209bd..7e729dd9e915 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -103,3 +103,94 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) return EMULATE_DONE; } + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret; + unsigned long val; + u32 addr, rd, rj, opcode; + + /* + * Each IOCSR with different opcode + */ + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + opcode = inst.reg2_format.opcode; + addr = vcpu->arch.gprs[rj]; + ret = EMULATE_DO_IOCSR; + run->iocsr_io.phys_addr = addr; + run->iocsr_io.is_write = 0; + + /* LoongArch is Little endian */ + switch (opcode) { + case iocsrrdb_op: + run->iocsr_io.len = 1; + break; + case iocsrrdh_op: + run->iocsr_io.len = 2; + break; + case iocsrrdw_op: + run->iocsr_io.len = 4; + break; + case iocsrrdd_op: + run->iocsr_io.len = 8; + break; + case iocsrwrb_op: + run->iocsr_io.len = 1; + run->iocsr_io.is_write = 1; + break; + case iocsrwrh_op: + run->iocsr_io.len = 2; + run->iocsr_io.is_write = 1; + break; + case iocsrwrw_op: + run->iocsr_io.len = 4; + run->iocsr_io.is_write = 1; + break; + case iocsrwrd_op: + run->iocsr_io.len = 8; + run->iocsr_io.is_write = 1; + break; + default: + ret = EMULATE_FAIL; + break; + } + + if (ret == EMULATE_DO_IOCSR) { + if (run->iocsr_io.is_write) { + val = vcpu->arch.gprs[rd]; + memcpy(run->iocsr_io.data, &val, run->iocsr_io.len); + } + vcpu->arch.io_gpr = rd; + } + + return ret; +} + +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + switch (run->iocsr_io.len) { + case 1: + *gpr = *(s8 *)run->iocsr_io.data; + break; + case 2: + *gpr = *(s16 *)run->iocsr_io.data; + break; + case 4: + *gpr = *(s32 *)run->iocsr_io.data; + break; + case 8: + *gpr = *(s64 *)run->iocsr_io.data; + break; + default: + kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n", + run->iocsr_io.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} -- Gitee From e3754c2909c65b6574ea600ac461b4e663b22122 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 173/953] LoongArch: KVM: Implement handle idle exception ANBZ: #8436 commit f41c8bdbbdbe73343d4842e580c6ab9db9d84171 upstream. Implement kvm handle LoongArch vcpu idle exception, using kvm_vcpu_block to emulate it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 7e729dd9e915..d4d7e74f72af 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -194,3 +194,23 @@ int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) return er; } + +int kvm_emu_idle(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.idle_exits; + trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); + + if (!kvm_arch_vcpu_runnable(vcpu)) { + /* + * Switch to the software timer before halt-polling/blocking as + * the guest's timer may be a break event for the vCPU, and the + * hypervisor timer runs only when the CPU is in guest mode. + * Switch before halt-polling so that KVM recognizes an expired + * timer before blocking. + */ + kvm_save_timer(vcpu); + kvm_vcpu_block(vcpu); + } + + return EMULATE_DONE; +} -- Gitee From 99fc163a29d1bf0d56fa1bb3b554cbc4790efc36 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 174/953] LoongArch: KVM: Implement handle gspr exception ANBZ: #8436 commit 13c82f5e6e5088a2998036714239cf00e48f5c10 upstream. Implement kvm handle gspr exception interface, including emulate the reading and writing of cpucfg, csr, iocsr resource. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 107 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index d4d7e74f72af..33d1b4190a62 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -214,3 +214,110 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ + int rd, rj; + unsigned int index; + unsigned long curr_pc; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + + /* Fetch the instruction */ + inst.word = vcpu->arch.badi; + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + trace_kvm_exit_gspr(vcpu, inst.word); + er = EMULATE_FAIL; + switch (((inst.word >> 24) & 0xff)) { + case 0x0: /* CPUCFG GSPR */ + if (inst.reg2_format.opcode == 0x1B) { + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + er = EMULATE_DONE; + /* + * By LoongArch Reference Manual 2.2.10.5 + * return value is 0 for undefined cpucfg index + */ + if (index < KVM_MAX_CPUCFG_REGS) + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + else + vcpu->arch.gprs[rd] = 0; + } + break; + case 0x4: /* CSR{RD,WR,XCHG} GSPR */ + er = kvm_handle_csr(vcpu, inst); + break; + case 0x6: /* Cache, Idle and IOCSR GSPR */ + switch (((inst.word >> 22) & 0x3ff)) { + case 0x18: /* Cache GSPR */ + er = EMULATE_DONE; + trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); + break; + case 0x19: /* Idle/IOCSR GSPR */ + switch (((inst.word >> 15) & 0x1ffff)) { + case 0xc90: /* IOCSR GSPR */ + er = kvm_emu_iocsr(inst, run, vcpu); + break; + case 0xc91: /* Idle GSPR */ + er = kvm_emu_idle(vcpu); + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) { + kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n", + curr_pc, __func__, inst.word); + + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->arch.pc = curr_pc; + } + + return er; +} + +/* + * Trigger GSPR: + * 1) Execute CPUCFG instruction; + * 2) Execute CACOP/IDLE instructions; + * 3) Access to unimplemented CSRs/IOCSRs. + */ +static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + enum emulation_result er = EMULATE_DONE; + + er = kvm_trap_handle_gspr(vcpu); + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + vcpu->run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else if (er == EMULATE_DO_IOCSR) { + vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + ret = RESUME_GUEST; + } + + return ret; +} -- Gitee From bb46fd28303531dbb9a1dcd740467c15115b0c19 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 175/953] LoongArch: KVM: Implement handle mmio exception ANBZ: #8436 commit d5b65882d57c91e5fec8c4c0b0a0a88f343b4525 upstream. Implement handle mmio exception, setting the mmio info into vcpu_run and return to user space to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 310 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 310 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 33d1b4190a62..c31894b75b07 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -321,3 +321,313 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu) return ret; } + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int op8, opcode, rd; + struct kvm_run *run = vcpu->run; + + run->mmio.phys_addr = vcpu->arch.badv; + vcpu->mmio_needed = 2; /* signed */ + op8 = (inst.word >> 24) & 0xff; + ret = EMULATE_DO_MMIO; + + switch (op8) { + case 0x24 ... 0x27: /* ldptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case ldptrw_op: + run->mmio.len = 4; + break; + case ldptrd_op: + run->mmio.len = 8; + break; + default: + break; + } + break; + case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + + switch (opcode) { + case ldb_op: + run->mmio.len = 1; + break; + case ldbu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 1; + break; + case ldh_op: + run->mmio.len = 2; + break; + case ldhu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 2; + break; + case ldw_op: + run->mmio.len = 4; + break; + case ldwu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 4; + break; + case ldd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case ldxb_op: + run->mmio.len = 1; + break; + case ldxbu_op: + run->mmio.len = 1; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxh_op: + run->mmio.len = 2; + break; + case ldxhu_op: + run->mmio.len = 2; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxw_op: + run->mmio.len = 4; + break; + case ldxwu_op: + run->mmio.len = 4; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + /* Set for kvm_complete_mmio_read() use */ + vcpu->arch.io_gpr = rd; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + } else { + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + } + + return ret; +} + +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + /* Update with new PC */ + update_pc(&vcpu->arch); + switch (run->mmio.len) { + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(s8 *)run->mmio.data; + else + *gpr = *(u8 *)run->mmio.data; + break; + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(s16 *)run->mmio.data; + else + *gpr = *(u16 *)run->mmio.data; + break; + case 4: + if (vcpu->mmio_needed == 2) + *gpr = *(s32 *)run->mmio.data; + else + *gpr = *(u32 *)run->mmio.data; + break; + case 8: + *gpr = *(s64 *)run->mmio.data; + break; + default: + kvm_err("Bad MMIO length: %d, addr is 0x%lx\n", + run->mmio.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int rd, op8, opcode; + unsigned long curr_pc, rd_val = 0; + struct kvm_run *run = vcpu->run; + void *data = run->mmio.data; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + op8 = (inst.word >> 24) & 0xff; + run->mmio.phys_addr = vcpu->arch.badv; + ret = EMULATE_DO_MMIO; + switch (op8) { + case 0x24 ... 0x27: /* stptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case stptrw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stptrd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x28 ... 0x2e: /* st.b/h/w/d process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + rd_val = vcpu->arch.gprs[rd]; + + switch (opcode) { + case stb_op: + run->mmio.len = 1; + *(unsigned char *)data = rd_val; + break; + case sth_op: + run->mmio.len = 2; + *(unsigned short *)data = rd_val; + break; + case stw_op: + run->mmio.len = 4; + *(unsigned int *)data = rd_val; + break; + case std_op: + run->mmio.len = 8; + *(unsigned long *)data = rd_val; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* stx.b/h/w/d process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case stxb_op: + run->mmio.len = 1; + *(unsigned char *)data = vcpu->arch.gprs[rd]; + break; + case stxh_op: + run->mmio.len = 2; + *(unsigned short *)data = vcpu->arch.gprs[rd]; + break; + case stxw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stxd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + } else { + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + } + + return ret; +} + +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +{ + int ret; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + unsigned long badv = vcpu->arch.badv; + + ret = kvm_handle_mm_fault(vcpu, badv, write); + if (ret) { + /* Treat as MMIO */ + inst.word = vcpu->arch.badi; + if (write) { + er = kvm_emu_mmio_write(vcpu, inst); + } else { + /* A code fetch fault doesn't count as an MMIO */ + if (kvm_is_ifetch_fault(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF); + return RESUME_GUEST; + } + + er = kvm_emu_mmio_read(vcpu, inst); + } + } + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + ret = RESUME_GUEST; + } + + return ret; +} + +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, false); +} + +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, true); +} -- Gitee From 5478183855b7fd223a0d2a619a571c9674e8ff98 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 176/953] LoongArch: KVM: Implement handle fpu exception ANBZ: #8436 commit 37cdfc6dbf04169310a24f3a79b554c363260562 upstream. Implement handle fpu exception, using kvm_own_fpu() to enable fpu for guest. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index c31894b75b07..e855ab9099b2 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -631,3 +631,30 @@ static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) { return kvm_handle_rdwr_fault(vcpu, true); } + +/** + * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use fpu which hasn't been allowed + * by the root context. + */ +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + /* + * If guest FPU not present, the FPU operation should have been + * treated as a reserved instruction! + * If FPU already in use, we shouldn't get this at all. + */ + if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) { + kvm_err("%s internal error\n", __func__); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } + + kvm_own_fpu(vcpu); + + return RESUME_GUEST; +} -- Gitee From bde4b6a6f5032563ac4c63c73f71a05b76f7f4fb Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 177/953] LoongArch: KVM: Implement kvm exception vectors ANBZ: #8436 commit 71f4fb845874c3c54527e2e5afd687493db9d4d4 upstream. Implement kvm exception vectors, using kvm_fault_tables array to save the handle function pointers and it is used when vcpu handle guest exit. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index e855ab9099b2..ce8de3fa472c 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -658,3 +658,39 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } + +/* + * LoongArch KVM callback handling for unimplemented guest exiting + */ +static int kvm_fault_ni(struct kvm_vcpu *vcpu) +{ + unsigned int ecode, inst; + unsigned long estat, badv; + + /* Fetch the instruction */ + inst = vcpu->arch.badi; + badv = vcpu->arch.badv; + estat = vcpu->arch.host_estat; + ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", + ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); + kvm_arch_vcpu_dump_regs(vcpu); + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni, + [EXCCODE_TLBI] = kvm_handle_read_fault, + [EXCCODE_TLBL] = kvm_handle_read_fault, + [EXCCODE_TLBS] = kvm_handle_write_fault, + [EXCCODE_TLBM] = kvm_handle_write_fault, + [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_GSPR] = kvm_handle_gspr, +}; + +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) +{ + return kvm_fault_tables[fault](vcpu); +} -- Gitee From e072d4c73444d230175eb79e5622f621807e0426 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 178/953] LoongArch: KVM: Implement vcpu world switch ANBZ: #8436 commit 39fdf4be72f2b81238acbd4da48c75c135a6f1e0 upstream. Implement LoongArch vcpu world switch, including vcpu enter guest and vcpu exit from guest, both operations need to save or restore the host and guest registers. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kernel/asm-offsets.c | 32 ++++ arch/loongarch/kvm/switch.S | 250 ++++++++++++++++++++++++++++ 2 files changed, 282 insertions(+) create mode 100644 arch/loongarch/kvm/switch.S diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 8da0726777ed..173fe514fc9e 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -289,3 +290,34 @@ void output_fgraph_ret_regs_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT("KVM/LoongArch Specific offsets."); + + OFFSET(VCPU_FCC, kvm_vcpu_arch, fpu.fcc); + OFFSET(VCPU_FCSR0, kvm_vcpu_arch, fpu.fcsr); + BLANK(); + + OFFSET(KVM_VCPU_ARCH, kvm_vcpu, arch); + OFFSET(KVM_VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_VCPU_RUN, kvm_vcpu, run); + BLANK(); + + OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp); + OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp); + OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd); + OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit); + OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry); + OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry); + OFFSET(KVM_ARCH_GPC, kvm_vcpu_arch, pc); + OFFSET(KVM_ARCH_GGPR, kvm_vcpu_arch, gprs); + OFFSET(KVM_ARCH_HBADI, kvm_vcpu_arch, badi); + OFFSET(KVM_ARCH_HBADV, kvm_vcpu_arch, badv); + OFFSET(KVM_ARCH_HECFG, kvm_vcpu_arch, host_ecfg); + OFFSET(KVM_ARCH_HESTAT, kvm_vcpu_arch, host_estat); + OFFSET(KVM_ARCH_HPERCPU, kvm_vcpu_arch, host_percpu); + + OFFSET(KVM_GPGD, kvm, arch.pgd); + BLANK(); +} diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S new file mode 100644 index 000000000000..0ed9040307b7 --- /dev/null +++ b/arch/loongarch/kvm/switch.S @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define HGPR_OFFSET(x) (PT_R0 + 8*x) +#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) + +.macro kvm_save_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +/* + * Save and restore all GPRs except base register, + * and default value of base register is a2. + */ +.macro kvm_save_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +/* + * Prepare switch to guest, save host regs and restore guest regs. + * a2: kvm_vcpu_arch, don't touch it until 'ertn' + * t0, t1: temp register + */ +.macro kvm_switch_to_guest + /* Set host ECFG.VS=0, all exceptions share one exception entry */ + csrrd t0, LOONGARCH_CSR_ECFG + bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT + csrwr t0, LOONGARCH_CSR_ECFG + + /* Load up the new EENTRY */ + ld.d t0, a2, KVM_ARCH_GEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Set Guest ERA */ + ld.d t0, a2, KVM_ARCH_GPC + csrwr t0, LOONGARCH_CSR_ERA + + /* Save host PGDL */ + csrrd t0, LOONGARCH_CSR_PGDL + st.d t0, a2, KVM_ARCH_HPGD + + /* Switch to kvm */ + ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH + + /* Load guest PGDL */ + li.w t0, KVM_GPGD + ldx.d t0, t1, t0 + csrwr t0, LOONGARCH_CSR_PGDL + + /* Mix GID and RID */ + csrrd t1, LOONGARCH_CSR_GSTAT + bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + + /* + * Enable intr in root mode with future ertn so that host interrupt + * can be responsed during VM runs + * Guest CRMD comes from separate GCSR_CRMD register + */ + ori t0, zero, CSR_PRMD_PIE + csrxchg t0, t0, LOONGARCH_CSR_PRMD + + /* Set PVM bit to setup ertn to guest context */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg t0, t0, LOONGARCH_CSR_GSTAT + + /* Load Guest GPRs */ + kvm_restore_guest_gprs a2 + /* Load KVM_ARCH register */ + ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ +.endm + + /* + * Exception entry for general exception from guest mode + * - IRQ is disabled + * - kernel privilege in root mode + * - page mode keep unchanged from previous PRMD in root mode + * - Fixme: tlb exception cannot happen since registers relative with TLB + * - is still in guest mode, such as pgd table/vmid registers etc, + * - will fix with hw page walk enabled in future + * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS + */ + .text + .cfi_sections .debug_frame +SYM_CODE_START(kvm_exc_entry) + csrwr a2, KVM_TEMP_KS + csrrd a2, KVM_VCPU_KS + addi.d a2, a2, KVM_VCPU_ARCH + + /* After save GPRs, free to use any GPR */ + kvm_save_guest_gprs a2 + /* Save guest A2 */ + csrrd t0, KVM_TEMP_KS + st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + /* A2 is kvm_vcpu_arch, A1 is free to use */ + csrrd s1, KVM_VCPU_KS + ld.d s0, s1, KVM_VCPU_RUN + + csrrd t0, LOONGARCH_CSR_ESTAT + st.d t0, a2, KVM_ARCH_HESTAT + csrrd t0, LOONGARCH_CSR_ERA + st.d t0, a2, KVM_ARCH_GPC + csrrd t0, LOONGARCH_CSR_BADV + st.d t0, a2, KVM_ARCH_HBADV + csrrd t0, LOONGARCH_CSR_BADI + st.d t0, a2, KVM_ARCH_HBADI + + /* Restore host ECFG.VS */ + csrrd t0, LOONGARCH_CSR_ECFG + ld.d t1, a2, KVM_ARCH_HECFG + or t0, t0, t1 + csrwr t0, LOONGARCH_CSR_ECFG + + /* Restore host EENTRY */ + ld.d t0, a2, KVM_ARCH_HEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Restore host pgd table */ + ld.d t0, a2, KVM_ARCH_HPGD + csrwr t0, LOONGARCH_CSR_PGDL + + /* + * Disable PGM bit to enter root mode by default with next ertn + */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg zero, t0, LOONGARCH_CSR_GSTAT + + /* + * Clear GTLBC.TGID field + * 0: for root tlb update in future tlb instr + * others: for guest tlb update like gpa to hpa in future tlb instr + */ + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + ld.d tp, a2, KVM_ARCH_HTP + ld.d sp, a2, KVM_ARCH_HSP + /* restore per cpu register */ + ld.d u0, a2, KVM_ARCH_HPERCPU + addi.d sp, sp, -PT_SIZE + + /* Prepare handle exception */ + or a0, s0, zero + or a1, s1, zero + ld.d t8, a2, KVM_ARCH_HANDLE_EXIT + jirl ra, t8, 0 + + or a2, s1, zero + addi.d a2, a2, KVM_VCPU_ARCH + + /* Resume host when ret <= 0 */ + blez a0, ret_to_host + + /* + * Return to guest + * Save per cpu register again, maybe switched to another cpu + */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr s1, KVM_VCPU_KS + kvm_switch_to_guest + +ret_to_host: + ld.d a2, a2, KVM_ARCH_HSP + addi.d a2, a2, -PT_SIZE + kvm_restore_host_gpr a2 + jr ra + +SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) +SYM_CODE_END(kvm_exc_entry) + +/* + * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) + * + * @register_param: + * a0: kvm_run* run + * a1: kvm_vcpu* vcpu + */ +SYM_FUNC_START(kvm_enter_guest) + /* Allocate space in stack bottom */ + addi.d a2, sp, -PT_SIZE + /* Save host GPRs */ + kvm_save_host_gpr a2 + + /* Save host CRMD, PRMD to stack */ + csrrd a3, LOONGARCH_CSR_CRMD + st.d a3, a2, PT_CRMD + csrrd a3, LOONGARCH_CSR_PRMD + st.d a3, a2, PT_PRMD + + addi.d a2, a1, KVM_VCPU_ARCH + st.d sp, a2, KVM_ARCH_HSP + st.d tp, a2, KVM_ARCH_HTP + /* Save per cpu register */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr a1, KVM_VCPU_KS + kvm_switch_to_guest +SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) +SYM_FUNC_END(kvm_enter_guest) + +SYM_FUNC_START(kvm_save_fpu) + fpu_save_csr a0 t1 + fpu_save_double a0 t1 + fpu_save_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_save_fpu) + +SYM_FUNC_START(kvm_restore_fpu) + fpu_restore_double a0 t1 + fpu_restore_csr a0 t1 t2 + fpu_restore_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_fpu) + + .section ".rodata" +SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) +SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) -- Gitee From c4ba14545754ff160daaf35b766b9c261ac04572 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 179/953] LoongArch: KVM: Enable kvm config and add the makefile ANBZ: #8436 commit c1fc48aad14dbe7654f5986afb906332b528d54b upstream. Enable LoongArch kvm config and add the makefile to support build kvm module. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/Kbuild | 2 ++ arch/loongarch/Kconfig | 6 ++++ arch/loongarch/configs/loongson3_defconfig | 2 ++ arch/loongarch/kvm/Kconfig | 40 ++++++++++++++++++++++ arch/loongarch/kvm/Makefile | 22 ++++++++++++ 5 files changed, 72 insertions(+) create mode 100644 arch/loongarch/kvm/Kconfig create mode 100644 arch/loongarch/kvm/Makefile diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index b01f5cdb27e0..beb8499dd8ed 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -3,5 +3,7 @@ obj-y += mm/ obj-y += net/ obj-y += vdso/ +obj-$(CONFIG_KVM) += kvm/ + # for cleaning subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 1463213f3315..54a169dee80f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -131,6 +131,7 @@ config LOONGARCH select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_KVM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_PCI @@ -266,6 +267,9 @@ config AS_HAS_LASX_EXTENSION config AS_HAS_LBT_EXTENSION def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0) +config AS_HAS_LVZ_EXTENSION + def_bool $(as-instr,hvcl 0) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -659,3 +663,5 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" endmenu + +source "arch/loongarch/kvm/Kconfig" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 569484674f7e..101c9d83cd43 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -70,6 +70,8 @@ CONFIG_EFI_ZBOOT=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig new file mode 100644 index 000000000000..fda425babfb2 --- /dev/null +++ b/arch/loongarch/kvm/Kconfig @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on AS_HAS_LVZ_EXTENSION + depends on HAVE_KVM + select HAVE_KVM_DIRTY_RING_ACQ_REL + select HAVE_KVM_EVENTFD + select HAVE_KVM_VCPU_ASYNC_IOCTL + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_MMIO + select KVM_XFER_TO_GUEST_WORK + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + help + Support hosting virtualized guest machines using + hardware virtualization extensions. You will need + a processor equipped with virtualization extensions. + + If unsure, say N. + +endif # VIRTUALIZATION diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile new file mode 100644 index 000000000000..244467d7792a --- /dev/null +++ b/arch/loongarch/kvm/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for LoongArch KVM support +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += exit.o +kvm-y += interrupt.o +kvm-y += main.o +kvm-y += mmu.o +kvm-y += switch.o +kvm-y += timer.o +kvm-y += tlb.o +kvm-y += vcpu.o +kvm-y += vm.o + +CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) -- Gitee From c8320653313d59935ed98449890bb3cc3fb3d366 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 180/953] LoongArch: KVM: Supplement kvm document about LoongArch-specific part ANBZ: #8436 commit 6f0257a03212d4f66954ce14402adb5c68fed075 upstream. Supplement kvm document about LoongArch-specific part, such as add api introduction for GET/SET_ONE_REG, GET/SET_FPU, GET/SET_MP_STATE, etc. Reviewed-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- Documentation/virt/kvm/api.rst | 70 +++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 21a7578142a1..edc682a94ca4 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -416,6 +416,13 @@ Reads the general purpose registers from the vcpu. __u64 pc; }; + /* LoongArch */ + struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + unsigned long gpr[32]; + unsigned long pc; + }; + 4.12 KVM_SET_REGS ----------------- @@ -506,7 +513,7 @@ translation mode. ------------------ :Capability: basic -:Architectures: x86, ppc, mips, riscv +:Architectures: x86, ppc, mips, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_interrupt (in) :Returns: 0 on success, negative on failure. @@ -592,6 +599,14 @@ b) KVM_INTERRUPT_UNSET This is an asynchronous vcpu ioctl and can be invoked from any thread. +LOONGARCH: +^^^^^^^^^^ + +Queues an external interrupt to be injected into the virtual CPU. A negative +interrupt number dequeues the interrupt. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + 4.17 KVM_DEBUG_GUEST -------------------- @@ -737,7 +752,7 @@ signal mask. ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (out) :Returns: 0 on success, -1 on error @@ -746,7 +761,7 @@ Reads the floating point state from the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -761,12 +776,21 @@ Reads the floating point state from the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.23 KVM_SET_FPU ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (in) :Returns: 0 on success, -1 on error @@ -775,7 +799,7 @@ Writes the floating point state to the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -790,6 +814,15 @@ Writes the floating point state to the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.24 KVM_CREATE_IRQCHIP ----------------------- @@ -1387,7 +1420,7 @@ documentation when it pops into existence). ------------------- :Capability: KVM_CAP_ENABLE_CAP -:Architectures: mips, ppc, s390, x86 +:Architectures: mips, ppc, s390, x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_enable_cap (in) :Returns: 0 on success; -1 on error @@ -1442,7 +1475,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1460,7 +1493,7 @@ Possible values are: ========================== =============================================== KVM_MP_STATE_RUNNABLE the vcpu is currently running - [x86,arm64,riscv] + [x86,arm64,riscv,loongarch] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1516,11 +1549,14 @@ For riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.39 KVM_SET_MP_STATE --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1538,6 +1574,9 @@ For arm64/riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.40 KVM_SET_IDENTITY_MAP_ADDR ------------------------------ @@ -2841,6 +2880,19 @@ Following are the RISC-V D-extension registers: 0x8020 0000 0600 0020 fcsr Floating point control and status register ======================= ========= ============================================= +LoongArch registers are mapped using the lower 32 bits. The upper 16 bits of +that is the register group type. + +LoongArch csr registers are used to control guest cpu or get status of guest +cpu, and they have the following id bit patterns:: + + 0x9030 0000 0001 00 (64-bit) + +LoongArch KVM control registers are used to implement some new defined functions +such as set vcpu counter or reset vcpu, and they have the following id bit patterns:: + + 0x9030 0000 0002 + 4.69 KVM_GET_ONE_REG -------------------- -- Gitee From 511729190248ed3c55fb1d3d2b88893716c6d867 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 181/953] LoongArch: KVM: Add maintainers for LoongArch KVM ANBZ: #8436 commit 2c10cda4b777be4be9d9e69e4f70c818dbb15e21 upstream. Add maintainers for LoongArch KVM. Acked-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 40312bb550f0..3a3a96e4dd39 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11522,6 +11522,18 @@ F: include/kvm/arm_* F: tools/testing/selftests/kvm/*/aarch64/ F: tools/testing/selftests/kvm/aarch64/ +KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch) +M: Tianrui Zhao +M: Bibo Mao +M: Huacai Chen +L: kvm@vger.kernel.org +L: loongarch@lists.linux.dev +S: Maintained +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/loongarch/include/asm/kvm* +F: arch/loongarch/include/uapi/asm/kvm* +F: arch/loongarch/kvm/ + KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) M: Huacai Chen L: linux-mips@vger.kernel.org -- Gitee From d745bf6b2668e766cc6c51b0a04a338209f3357c Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 182/953] LoongArch: KVM: Optimization for memslot hugepage checking ANBZ: #8436 commit 7ab6fb505b2a7447c4a7237a12c59e3ad0c7298c upstream. During shadow mmu page fault, there is checking for huge page for specified memslot. Page fault is hot path, check logic can be done when memslot is created. Here two flags are added for huge page checking, KVM_MEM_HUGEPAGE_CAPABLE and KVM_MEM_HUGEPAGE_INCAPABLE. Indeed for an optimized qemu, memslot for DRAM is always huge page aligned. The flag is firstly checked during hot page fault path. Now only huge page flag is supported, there is a long way for super page support in LoongArch system. Since super page size is 64G for 16K pagesize and 1G for 4K pagesize, 64G physical address is rarely used and LoongArch kernel needs support super page for 4K. Also memory layout of LoongArch qemu VM should be 1G aligned. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 3 + arch/loongarch/kvm/mmu.c | 124 +++++++++++++++++--------- 2 files changed, 86 insertions(+), 41 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 11328700d4fa..0e89db020481 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -45,7 +45,10 @@ struct kvm_vcpu_stat { u64 signal_exits; }; +#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) +#define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) struct kvm_arch_memory_slot { + unsigned long flags; }; struct kvm_context { diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 80480df5f550..915f17527893 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -13,6 +13,16 @@ #include #include +static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE; +} + +static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE; +} + static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) { ctx->level = kvm->arch.root_level; @@ -365,6 +375,69 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); } +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + gpa_t gpa_start; + hva_t hva_start; + size_t size, gpa_offset, hva_offset; + + if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE)) + return 0; + /* + * Prevent userspace from creating a memory region outside of the + * VM GPA address space + */ + if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) + return -ENOMEM; + + new->arch.flags = 0; + size = new->npages * PAGE_SIZE; + gpa_start = new->base_gfn << PAGE_SHIFT; + hva_start = new->userspace_addr; + if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) + && IS_ALIGNED(hva_start, PMD_SIZE)) + new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE; + else { + /* + * Pages belonging to memslots that don't have the same + * alignment within a PMD for userspace and GPA cannot be + * mapped with PMD entries, because we'll end up mapping + * the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this + * incorrect mapping: + * d -> f + * e -> g + * f -> h + */ + gpa_offset = gpa_start & (PMD_SIZE - 1); + hva_offset = hva_start & (PMD_SIZE - 1); + if (gpa_offset != hva_offset) { + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } else { + if (gpa_offset == 0) + gpa_offset = PMD_SIZE; + if ((size + gpa_offset) < (PMD_SIZE * 2)) + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } + } + + return 0; +} + void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, @@ -562,47 +635,23 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ } static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, - unsigned long hva, unsigned long map_size, bool write) + unsigned long hva, bool write) { - size_t size; - gpa_t gpa_start; - hva_t uaddr_start, uaddr_end; + hva_t start, end; /* Disable dirty logging on HugePages */ if (kvm_slot_dirty_track_enabled(memslot) && write) return false; - size = memslot->npages * PAGE_SIZE; - gpa_start = memslot->base_gfn << PAGE_SHIFT; - uaddr_start = memslot->userspace_addr; - uaddr_end = uaddr_start + size; + if (kvm_hugepage_capable(memslot)) + return true; - /* - * Pages belonging to memslots that don't have the same alignment - * within a PMD for userspace and GPA cannot be mapped with stage-2 - * PMD entries, because we'll end up mapping the wrong pages. - * - * Consider a layout like the following: - * - * memslot->userspace_addr: - * +-----+--------------------+--------------------+---+ - * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| - * +-----+--------------------+--------------------+---+ - * - * memslot->base_gfn << PAGE_SIZE: - * +---+--------------------+--------------------+-----+ - * |abc|def Stage-2 block | Stage-2 block |tvxyz| - * +---+--------------------+--------------------+-----+ - * - * If we create those stage-2 blocks, we'll end up with this incorrect - * mapping: - * d -> f - * e -> g - * f -> h - */ - if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + if (kvm_hugepage_incapable(memslot)) return false; + start = memslot->userspace_addr; + end = start + memslot->npages * PAGE_SIZE; + /* * Next, let's make sure we're not trying to map anything not covered * by the memslot. This means we have to prohibit block size mappings @@ -615,8 +664,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, * userspace_addr or the base_gfn, as both are equally aligned (per * the check above) and equally sized. */ - return (hva & ~(map_size - 1)) >= uaddr_start && - (hva & ~(map_size - 1)) + map_size <= uaddr_end; + return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); } /* @@ -842,7 +890,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) /* Disable dirty logging on HugePages */ level = 0; - if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) { + if (!fault_supports_huge_mapping(memslot, hva, write)) { level = 0; } else { level = host_pfn_mapping_level(kvm, gfn, memslot); @@ -901,12 +949,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } -int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, - struct kvm_memory_slot *new, enum kvm_mr_change change) -{ - return 0; -} - void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { -- Gitee From d5dd28df26459d148b4d06907e998be0a1216e65 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 183/953] LoongArch: KVM: Remove SW timer switch when vcpu is halt polling ANBZ: #8436 commit 161267320158920a601e40d83fdac60bcaa2acb5 upstream. With halt-polling supported, there is checking for pending events or interrupts when vcpu executes idle instruction. Pending interrupts include injected SW interrupts and passthrough HW interrupts, such as HW timer interrupts, since HW timer works still even if vcpu exists from VM mode. Since HW timer pending interrupt can be set directly with CSR status register, and pending HW timer interrupt checking is used in vcpu block checking function, it is not necessary to switch to SW timer during halt-polling. This patch adds preemption disabling in function kvm_cpu_has_pending_timer(), and removes SW timer switching in idle instruction emulation function. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/exit.c | 13 ++----------- arch/loongarch/kvm/timer.c | 12 +++++++++--- arch/loongarch/kvm/vcpu.c | 9 ++++++++- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ce8de3fa472c..e708a1786d6b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) ++vcpu->stat.idle_exits; trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); - if (!kvm_arch_vcpu_runnable(vcpu)) { - /* - * Switch to the software timer before halt-polling/blocking as - * the guest's timer may be a break event for the vCPU, and the - * hypervisor timer runs only when the CPU is in guest mode. - * Switch before halt-polling so that KVM recognizes an expired - * timer before blocking. - */ - kvm_save_timer(vcpu); - kvm_vcpu_block(vcpu); - } + if (!kvm_arch_vcpu_runnable(vcpu)) + kvm_vcpu_halt(vcpu); return EMULATE_DONE; } diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 284bf553fefe..12d58040122d 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -155,11 +155,17 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) */ hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else + } else if (vcpu->stat.generic.blocking) { /* - * Inject timer interrupt so that hall polling can dectect and exit + * Inject timer interrupt so that halt polling can dectect and exit. + * VCPU is scheduled out already and sleeps in rcuwait queue and + * will not poll pending events again. kvm_queue_irq() is not enough, + * hrtimer swtimer should be used here. */ - kvm_queue_irq(vcpu, INT_TI); + expire = ktime_add_ns(ktime_get(), 10); + vcpu->arch.expire = expire; + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } } /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 73d0c2b9c1a5..54f544b30f32 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return kvm_pending_timer(vcpu) || + int ret; + + /* Protect from TOD sync and vcpu_load/put() */ + preempt_disable(); + ret = kvm_pending_timer(vcpu) || kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); + preempt_enable(); + + return ret; } int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) -- Gitee From 0a79e0a0c3bdca46ba5636d847dfabfd6369473e Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 184/953] LoongArch: KVM: Allow to access HW timer CSR registers always ANBZ: #8436 commit 0d2abe67029644741bf7400b0d00c2faa3e1c455 upstream. Currently HW timer CSR registers are allowed to access before entering to vm and disabled if switch to SW timer in host mode, instead it is not necessary to do so. HW timer CSR registers can be accessed always, it is nothing to do with whether it is in vm mode or host mode. This patch removes the limitation. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/main.c | 1 - arch/loongarch/kvm/timer.c | 27 ++++++--------------------- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 1c1d5199500e..86a2f2d0cb27 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -287,7 +287,6 @@ int kvm_arch_hardware_enable(void) if (env & CSR_GCFG_MATC_ROOT) gcfg |= CSR_GCFG_MATC_ROOT; - gcfg |= CSR_GCFG_TIT; write_csr_gcfg(gcfg); kvm_flush_tlb_all(); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 12d58040122d..d6d5bcea349b 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -70,15 +70,6 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) */ void kvm_acquire_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg; - - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) - return; - - /* Enable guest access to hard timer */ - write_csr_gcfg(cfg & ~CSR_GCFG_TIT); - /* * Freeze the soft-timer and sync the guest stable timer with it. We do * this with interrupts disabled to avoid latency. @@ -174,21 +165,15 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) */ void kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg; struct loongarch_csrs *csr = vcpu->arch.csr; preempt_disable(); - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) { - /* Disable guest use of hard timer */ - write_csr_gcfg(cfg | CSR_GCFG_TIT); - - /* Save hard timer state */ - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); - if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) - _kvm_save_timer(vcpu); - } + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); /* Save timer-related state to vCPU context */ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); -- Gitee From b9aaec0d3fb9f5a2bd21e7a312d2e8c623505ac0 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 185/953] LoongArch: KVM: Remove kvm_acquire_timer() before entering guest ANBZ: #8436 commit 1ab9c6099495f79bfbcd6058d02d7556034a89b0 upstream. Timer emulation method in VM is switch to SW timer, there are two places where timer emulation is needed. One is during vcpu thread context switch, the other is halt-polling with idle instruction emulation. SW timer switching is removed during halt-polling mode, so it is not necessary to disable SW timer before entering to guest. This patch removes SW timer handling before entering guest mode, and put it in HW timer restoring flow when vcpu thread is sched-in. With this patch, vm timer emulation is simpler, there is SW/HW timer switch only in vcpu thread context switch scenario. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_vcpu.h | 1 - arch/loongarch/kvm/timer.c | 22 ++++++-------------- arch/loongarch/kvm/vcpu.c | 29 --------------------------- 3 files changed, 6 insertions(+), 46 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 553cfa2b2b1c..0e87652f780a 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -55,7 +55,6 @@ void kvm_save_fpu(struct loongarch_fpu *fpu); void kvm_restore_fpu(struct loongarch_fpu *fpu); void kvm_restore_fcsr(struct loongarch_fpu *fpu); -void kvm_acquire_timer(struct kvm_vcpu *vcpu); void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index d6d5bcea349b..d362d87a54aa 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -64,19 +64,6 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); } -/* - * Restore hard timer state and enable guest to access timer registers - * without trap, should be called with irq disabled - */ -void kvm_acquire_timer(struct kvm_vcpu *vcpu) -{ - /* - * Freeze the soft-timer and sync the guest stable timer with it. We do - * this with interrupts disabled to avoid latency. - */ - hrtimer_cancel(&vcpu->arch.swtimer); -} - /* * Restore soft timer state from saved context. */ @@ -98,6 +85,11 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) return; } + /* + * Freeze the soft-timer and sync the guest stable timer with it. + */ + hrtimer_cancel(&vcpu->arch.swtimer); + /* * Set remainder tick value if not expired */ @@ -115,7 +107,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) /* * Inject timer here though sw timer should inject timer * interrupt async already, since sw timer may be cancelled - * during injecting intr async in function kvm_acquire_timer + * during injecting intr async */ kvm_queue_irq(vcpu, INT_TI); } @@ -140,11 +132,9 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) vcpu->arch.expire = expire; if (ticks) { /* - * Update hrtimer to use new timeout * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time */ - hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); } else if (vcpu->stat.generic.blocking) { /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 54f544b30f32..53fcef8b24a1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -95,7 +95,6 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) * check vmid before vcpu enter guest */ local_irq_disable(); - kvm_acquire_timer(vcpu); kvm_deliver_intr(vcpu); kvm_deliver_exception(vcpu); /* Make sure the vcpu mode has been written */ @@ -251,23 +250,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } -/** - * kvm_migrate_count() - Migrate timer. - * @vcpu: Virtual CPU. - * - * Migrate hrtimer to the current CPU by cancelling and restarting it - * if the hrtimer is active. - * - * Must be called when the vCPU is migrated to a different CPU, so that - * the timer can interrupt the guest at the new CPU, and the timer irq can - * be delivered to the vCPU. - */ -static void kvm_migrate_count(struct kvm_vcpu *vcpu) -{ - if (hrtimer_cancel(&vcpu->arch.swtimer)) - hrtimer_restart(&vcpu->arch.swtimer); -} - static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) { unsigned long gintc; @@ -796,17 +778,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) unsigned long flags; local_irq_save(flags); - if (vcpu->arch.last_sched_cpu != cpu) { - kvm_debug("[%d->%d]KVM vCPU[%d] switch\n", - vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); - /* - * Migrate the timer interrupt to the current CPU so that it - * always interrupts the guest and synchronously triggers a - * guest timer interrupt. - */ - kvm_migrate_count(vcpu); - } - /* Restore guest state to registers */ _kvm_vcpu_load(vcpu, cpu); local_irq_restore(flags); -- Gitee From ee881dcbd95f08b3169c86be3f905ec258651d3a Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 186/953] LoongArch: KVM: Fix timer emulation with oneshot mode ANBZ: #8436 commit 5b3d524993ff1fb36089be850ccb121ac3296bcf upstream. When timer is fired in oneshot mode, CSR TVAL will be -1 rather than 0. There needs special handing for this situation. There are two scenarios when oneshot timer is fired. One scenario is that time is fired after exiting to host, CSR TVAL is set with 0 in order to inject hw interrupt, and -1 will assigned to CSR TVAL soon. The other situation is that timer is fired in VM and guest kernel is hanlding timer IRQ, IRQ is acked and is ready to set next expired timer value, then vm exits to host. Timer interrupt should not be inject at this point, else there will be spurious timer interrupt. Here hw timer irq status in CSR ESTAT is used to judge these two scenarios. If CSR TVAL is -1, the oneshot timer is fired; and if timer hw irq is on in CSR ESTAT register, it happens after exiting to host; else if timer hw irq is off, we think that it happens in vm and timer IRQ handler has already acked IRQ. With this patch, runltp with version ltp20230516 passes to run in vm. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/timer.c | 68 ++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index d362d87a54aa..111328f60872 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -69,14 +69,19 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) */ void kvm_restore_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg, delta, period; + unsigned long cfg, estat; + unsigned long ticks, delta, period; ktime_t expire, now; struct loongarch_csrs *csr = vcpu->arch.csr; /* * Set guest stable timer cfg csr + * Disable timer before restore estat CSR register, avoid to + * get invalid timer interrupt for old timer cfg */ cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + + write_gcsr_timercfg(0); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); if (!(cfg & CSR_TCFG_EN)) { @@ -90,20 +95,47 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) */ hrtimer_cancel(&vcpu->arch.swtimer); + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If oneshot timer is fired, CSR TVAL will be -1, there are two + * conditions: + * 1) timer is fired during exiting to host + * 2) timer is fired and vm is doing timer irq, and then exiting to + * host. Host should not inject timer irq to avoid spurious + * timer interrupt again + */ + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) { + /* + * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq + * and set CSR TVAL with -1 + */ + write_gcsr_timertick(0); + + /* + * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear + * timer interrupt, and CSR TVAL keeps unchanged with -1, it + * avoids spurious timer interrupt + */ + if (!(estat & CPU_TIMER)) + gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR); + return; + } + /* * Set remainder tick value if not expired */ + delta = 0; now = ktime_get(); expire = vcpu->arch.expire; if (ktime_before(now, expire)) delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); - else { - if (cfg & CSR_TCFG_PERIOD) { - period = cfg & CSR_TCFG_VAL; - delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); - delta = period - (delta % period); - } else - delta = 0; + else if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + /* * Inject timer here though sw timer should inject timer * interrupt async already, since sw timer may be cancelled @@ -122,15 +154,25 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) */ static void _kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long ticks, delta; + unsigned long ticks, delta, cfg; ktime_t expire; struct loongarch_csrs *csr = vcpu->arch.csr; + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); - delta = tick_to_ns(vcpu, ticks); - expire = ktime_add_ns(ktime_get(), delta); - vcpu->arch.expire = expire; - if (ticks) { + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG + * If oneshot timer is fired, CSR TVAL will be -1 + * Here judge one-shot timer fired by checking whether TVAL is larger + * than TCFG + */ + if (ticks < cfg) { + delta = tick_to_ns(vcpu, ticks); + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + /* * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time -- Gitee From a734565e8a260306f9311720e10b4cf23fd6791b Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 187/953] LoongArch: KVM: Add LSX (128bit SIMD) support ANBZ: #8436 commit db1ecca22edf27c5a3dd66af406c88b5b5ac7cc1 upstream. This patch adds LSX (128bit SIMD) support for LoongArch KVM. There will be LSX exception in KVM when guest use the LSX instructions. KVM will enable LSX and restore the vector registers for guest and then return to guest to continue running. Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 15 +- arch/loongarch/include/asm/kvm_vcpu.h | 10 ++ arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/loongarch/kernel/fpu.S | 1 + arch/loongarch/kvm/exit.c | 21 +++ arch/loongarch/kvm/switch.S | 16 ++ arch/loongarch/kvm/trace.h | 4 +- arch/loongarch/kvm/vcpu.c | 220 +++++++++++++++++++++++++- 8 files changed, 280 insertions(+), 8 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 0e89db020481..b0c5cdd8014c 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -95,8 +95,9 @@ enum emulation_result { }; #define KVM_LARCH_FPU (0x1 << 0) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 1) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 2) +#define KVM_LARCH_LSX (0x1 << 1) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 2) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 3) struct kvm_vcpu_arch { /* @@ -178,6 +179,16 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned csr->csrs[reg] = val; } +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_FP; +} + +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LSX; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 0e87652f780a..db08dd46b525 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -55,6 +55,16 @@ void kvm_save_fpu(struct loongarch_fpu *fpu); void kvm_restore_fpu(struct loongarch_fpu *fpu); void kvm_restore_fcsr(struct loongarch_fpu *fpu); +#ifdef CONFIG_CPU_HAS_LSX +int kvm_own_lsx(struct kvm_vcpu *vcpu); +void kvm_save_lsx(struct loongarch_fpu *fpu); +void kvm_restore_lsx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index c6ad2ee6106c..923d0bd38294 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -79,6 +79,7 @@ struct kvm_fpu { #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +#define KVM_LOONGARCH_VCPU_CPUCFG 0 struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index d53ab10f4644..a400924c0348 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -349,6 +349,7 @@ SYM_FUNC_START(_restore_lsx_upper) lsx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lsx_upper) +EXPORT_SYMBOL(_restore_lsx_upper) SYM_FUNC_START(_init_lsx_upper) lsx_init_all_upper t1 diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index e708a1786d6b..676f7a3a335c 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -634,6 +634,11 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; + if (!kvm_guest_has_fpu(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + return RESUME_GUEST; + } + /* * If guest FPU not present, the FPU operation should have been * treated as a reserved instruction! @@ -650,6 +655,21 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LSX when it is disabled in the root + * context. + */ +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lsx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -678,6 +698,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_TLBS] = kvm_handle_write_fault, [EXCCODE_TLBM] = kvm_handle_write_fault, [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, }; diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 0ed9040307b7..00fbf772d16f 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -245,6 +245,22 @@ SYM_FUNC_START(kvm_restore_fpu) jr ra SYM_FUNC_END(kvm_restore_fpu) +#ifdef CONFIG_CPU_HAS_LSX +SYM_FUNC_START(kvm_save_lsx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lsx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lsx) + +SYM_FUNC_START(kvm_restore_lsx) + lsx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lsx) +#endif + .section ".rodata" SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index a1e35d655418..7da4e230e896 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr, #define KVM_TRACE_AUX_DISCARD 4 #define KVM_TRACE_AUX_FPU 1 +#define KVM_TRACE_AUX_LSX 2 #define kvm_trace_symbol_aux_op \ { KVM_TRACE_AUX_SAVE, "save" }, \ @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr, { KVM_TRACE_AUX_DISCARD, "discard" } #define kvm_trace_symbol_aux_state \ - { KVM_TRACE_AUX_FPU, "FPU" } + { KVM_TRACE_AUX_FPU, "FPU" }, \ + { KVM_TRACE_AUX_LSX, "LSX" } TRACE_EVENT(kvm_aux, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 53fcef8b24a1..80487d177ca4 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -298,6 +298,69 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) return ret; } +static int _kvm_get_cpucfg(int id, u64 *v) +{ + int ret = 0; + + if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + switch (id) { + case 2: + /* Return CPUCFG2 features which have been supported by KVM */ + *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | + CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | + CPUCFG2_LAM; + /* + * If LSX is supported by CPU, it is also supported by KVM, + * as we implement it. + */ + if (cpu_has_lsx) + *v |= CPUCFG2_LSX; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int kvm_check_cpucfg(int id, u64 val) +{ + u64 mask; + int ret = 0; + + if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + if (_kvm_get_cpucfg(id, &mask)) + return ret; + + switch (id) { + case 2: + /* CPUCFG2 features checking */ + if (val & ~mask) + /* The unsupported features should not be set */ + ret = -EINVAL; + else if (!(val & CPUCFG2_LLFTP)) + /* The LLFTP must be set, as guest must has a constant timer */ + ret = -EINVAL; + else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when enable FP */ + ret = -EINVAL; + else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* FP should be set when enable LSX */ + ret = -EINVAL; + else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LSX, FP should be set when enable LASX, and FP has been checked before. */ + ret = -EINVAL; + break; + default: + break; + } + return ret; +} + static int kvm_get_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, u64 *v) { @@ -367,10 +430,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, break; case KVM_REG_LOONGARCH_CPUCFG: id = KVM_GET_IOC_CPUCFG_IDX(reg->id); - if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) - vcpu->arch.cpucfg[id] = (u32)v; - else - ret = -EINVAL; + ret = kvm_check_cpucfg(id, v); + if (ret) + break; + vcpu->arch.cpucfg[id] = (u32)v; break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { @@ -460,10 +523,94 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return -EINVAL; } +static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case 2: + return 0; + default: + return -ENXIO; + } + + return -ENXIO; +} + +static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = 0; + uint64_t val; + uint64_t __user *uaddr = (uint64_t __user *)attr->addr; + + ret = _kvm_get_cpucfg(attr->attr, &val); + if (ret) + return ret; + + put_user(val, uaddr); + + return ret; +} + +static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r; + struct kvm_device_attr attr; void __user *argp = (void __user *)arg; struct kvm_vcpu *vcpu = filp->private_data; @@ -503,6 +650,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } + case KVM_HAS_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); + break; + } + case KVM_GET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); + break; + } + case KVM_SET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); + break; + } default: r = -ENOIOCTLCMD; break; @@ -550,12 +718,54 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +#ifdef CONFIG_CPU_HAS_LSX +/* Enable LSX and restore context */ +int kvm_own_lsx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + /* Enable LSX for guest */ + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + case KVM_LARCH_FPU: + /* + * Guest FPU state already loaded, + * only restore upper LSX state + */ + _restore_lsx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, + * restore full LSX state + */ + kvm_restore_lsx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + /* Save context and disable FPU */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + kvm_save_lsx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); + + /* Disable LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { kvm_save_fpu(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); -- Gitee From f8c5596b5d92c54ddb0c1399a317f954972c7e46 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 188/953] LoongArch: KVM: Add LASX (256bit SIMD) support ANBZ: #8436 commit 118e10cd893d57df55b3302dfd188a981b6e6d1c upstream. This patch adds LASX (256bit SIMD) support for LoongArch KVM. There will be LASX exception in KVM when guest use the LASX instructions. KVM will enable LASX and restore the vector registers for guest and then return to guest to continue running. Reviewed-by: Bibo Mao Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 10 ++++-- arch/loongarch/include/asm/kvm_vcpu.h | 10 ++++++ arch/loongarch/kernel/fpu.S | 1 + arch/loongarch/kvm/exit.c | 16 +++++++++ arch/loongarch/kvm/switch.S | 15 ++++++++ arch/loongarch/kvm/trace.h | 4 ++- arch/loongarch/kvm/vcpu.c | 51 ++++++++++++++++++++++++++- 7 files changed, 103 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index b0c5cdd8014c..5bdb34b2c5d6 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -96,8 +96,9 @@ enum emulation_result { #define KVM_LARCH_FPU (0x1 << 0) #define KVM_LARCH_LSX (0x1 << 1) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 2) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 3) +#define KVM_LARCH_LASX (0x1 << 2) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 3) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 4) struct kvm_vcpu_arch { /* @@ -189,6 +190,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LSX; } +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LASX; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index db08dd46b525..e71ceb88f29e 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -65,6 +65,16 @@ static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } #endif +#ifdef CONFIG_CPU_HAS_LASX +int kvm_own_lasx(struct kvm_vcpu *vcpu); +void kvm_save_lasx(struct loongarch_fpu *fpu); +void kvm_restore_lasx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index a400924c0348..4382e36ae3d4 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -385,6 +385,7 @@ SYM_FUNC_START(_restore_lasx_upper) lasx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lasx_upper) +EXPORT_SYMBOL(_restore_lasx_upper) SYM_FUNC_START(_init_lasx_upper) lasx_init_all_upper t1 diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 676f7a3a335c..ed1d89d53e2e 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -670,6 +670,21 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LASX when it is disabled in the root + * context. + */ +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lasx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -699,6 +714,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_TLBM] = kvm_handle_write_fault, [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, + [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, }; diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 00fbf772d16f..ba976509bfe8 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -261,6 +261,21 @@ SYM_FUNC_START(kvm_restore_lsx) SYM_FUNC_END(kvm_restore_lsx) #endif +#ifdef CONFIG_CPU_HAS_LASX +SYM_FUNC_START(kvm_save_lasx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lasx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lasx) + +SYM_FUNC_START(kvm_restore_lasx) + lasx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lasx) +#endif .section ".rodata" SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index 7da4e230e896..c2484ad4cffa 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -103,6 +103,7 @@ TRACE_EVENT(kvm_exit_gspr, #define KVM_TRACE_AUX_FPU 1 #define KVM_TRACE_AUX_LSX 2 +#define KVM_TRACE_AUX_LASX 3 #define kvm_trace_symbol_aux_op \ { KVM_TRACE_AUX_SAVE, "save" }, \ @@ -113,7 +114,8 @@ TRACE_EVENT(kvm_exit_gspr, #define kvm_trace_symbol_aux_state \ { KVM_TRACE_AUX_FPU, "FPU" }, \ - { KVM_TRACE_AUX_LSX, "LSX" } + { KVM_TRACE_AUX_LSX, "LSX" }, \ + { KVM_TRACE_AUX_LASX, "LASX" } TRACE_EVENT(kvm_aux, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 80487d177ca4..27701991886d 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -317,6 +317,13 @@ static int _kvm_get_cpucfg(int id, u64 *v) */ if (cpu_has_lsx) *v |= CPUCFG2_LSX; + /* + * if LASX is supported by CPU, it is also supported by KVM, + * as we implement it. + */ + if (cpu_has_lasx) + *v |= CPUCFG2_LASX; + break; default: ret = -EINVAL; @@ -753,12 +760,54 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) } #endif +#ifdef CONFIG_CPU_HAS_LASX +/* Enable LASX and restore context */ +int kvm_own_lasx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { + case KVM_LARCH_LSX: + case KVM_LARCH_LSX | KVM_LARCH_FPU: + /* Guest LSX state already loaded, only restore upper LASX state */ + _restore_lasx_upper(&vcpu->arch.fpu); + break; + case KVM_LARCH_FPU: + /* Guest FP state already loaded, only restore upper LSX & LASX state */ + _restore_lsx_upper(&vcpu->arch.fpu); + _restore_lasx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, restore full LASX state */ + kvm_restore_lasx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); + vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + /* Save context and disable FPU */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { + kvm_save_lasx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); + + /* Disable LASX & LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { kvm_save_lsx(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); -- Gitee From 820c992e57fa63ee218b5d9b512c86387b0e17d0 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 189/953] LoongArch: KVM: Fix input validation of _kvm_get_cpucfg() & kvm_check_cpucfg() ANBZ: #8436 commit 179af5751af59100305358ee0ee51eec9a7f3953 upstream. The range check for the CPUCFG ID is wrong (should have been a || instead of &&) and useless in effect, so fix the obvious mistake. Furthermore, the juggling of the temp return value is unnecessary, because it is semantically equivalent and more readable to just return at every switch case's end. This is done too to avoid potential bugs in the future related to the unwanted complexity. Also, the return value of _kvm_get_cpucfg is meant to be checked, but this was not done, so bad CPUCFG IDs wrongly fall back to the default case and 0 is incorrectly returned; check the return value to fix the UAPI behavior. While at it, also remove the redundant range check in kvm_check_cpucfg, because out-of-range CPUCFG IDs are already rejected by the -EINVAL as returned by _kvm_get_cpucfg(). Fixes: db1ecca22edf ("LoongArch: KVM: Add LSX (128bit SIMD) support") Fixes: 118e10cd893d ("LoongArch: KVM: Add LASX (256bit SIMD) support") Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 27701991886d..c8452aa5c11a 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -300,9 +300,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) static int _kvm_get_cpucfg(int id, u64 *v) { - int ret = 0; - - if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) return -EINVAL; switch (id) { @@ -324,32 +322,35 @@ static int _kvm_get_cpucfg(int id, u64 *v) if (cpu_has_lasx) *v |= CPUCFG2_LASX; - break; + return 0; default: - ret = -EINVAL; - break; + /* + * No restrictions on other valid CPUCFG IDs' values, but + * CPUCFG data is limited to 32 bits as the LoongArch ISA + * manual says (Volume 1, Section 2.2.10.5 "CPUCFG"). + */ + *v = U32_MAX; + return 0; } - return ret; } static int kvm_check_cpucfg(int id, u64 val) { - u64 mask; - int ret = 0; - - if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) - return -EINVAL; + int ret; + u64 mask = 0; - if (_kvm_get_cpucfg(id, &mask)) + ret = _kvm_get_cpucfg(id, &mask); + if (ret) return ret; + if (val & ~mask) + /* Unsupported features and/or the higher 32 bits should not be set */ + return -EINVAL; + switch (id) { case 2: /* CPUCFG2 features checking */ - if (val & ~mask) - /* The unsupported features should not be set */ - ret = -EINVAL; - else if (!(val & CPUCFG2_LLFTP)) + if (!(val & CPUCFG2_LLFTP)) /* The LLFTP must be set, as guest must has a constant timer */ ret = -EINVAL; else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) -- Gitee From 38bafee9b4198b471fb66cd7a5843a17f7e2b674 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 190/953] LoongArch: KVM: Rename _kvm_get_cpucfg() to _kvm_get_cpucfg_mask() ANBZ: #8436 commit ec83f39d2b078d6dd029bbde601835b5368fc886 upstream. The function is not actually a getter of guest CPUCFG, but rather validation of the input CPUCFG ID plus information about the supported bit flags of that CPUCFG leaf. So rename it to avoid confusion. Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index c8452aa5c11a..98c4290af9c4 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -298,7 +298,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) return ret; } -static int _kvm_get_cpucfg(int id, u64 *v) +static int _kvm_get_cpucfg_mask(int id, u64 *v) { if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) return -EINVAL; @@ -339,7 +339,7 @@ static int kvm_check_cpucfg(int id, u64 val) int ret; u64 mask = 0; - ret = _kvm_get_cpucfg(id, &mask); + ret = _kvm_get_cpucfg_mask(id, &mask); if (ret) return ret; @@ -567,7 +567,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, uint64_t val; uint64_t __user *uaddr = (uint64_t __user *)attr->addr; - ret = _kvm_get_cpucfg(attr->attr, &val); + ret = _kvm_get_cpucfg_mask(attr->attr, &val); if (ret) return ret; -- Gitee From 07922d0010f428fb713c90ea480659b16df4c834 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 191/953] LoongArch: KVM: Streamline kvm_check_cpucfg() and improve comments ANBZ: #8436 commit f0f5c4894f89bac9074b45bccc447c3659a0fa6f upstream. All the checks currently done in kvm_check_cpucfg can be realized with early returns, so just do that to avoid extra cognitive burden related to the return value handling. While at it, clean up comments of _kvm_get_cpucfg_mask() and kvm_check_cpucfg(), by removing comments that are merely restatement of the code nearby, and paraphrasing the rest so they read more natural for English speakers (that likely are not familiar with the actual Chinese- influenced grammar). No functional changes intended. Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 42 +++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 98c4290af9c4..36106922b5d7 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -305,20 +305,16 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) switch (id) { case 2: - /* Return CPUCFG2 features which have been supported by KVM */ + /* CPUCFG2 features unconditionally supported by KVM */ *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_LAM; /* - * If LSX is supported by CPU, it is also supported by KVM, - * as we implement it. + * For the ISA extensions listed below, if one is supported + * by the host, then it is also supported by KVM. */ if (cpu_has_lsx) *v |= CPUCFG2_LSX; - /* - * if LASX is supported by CPU, it is also supported by KVM, - * as we implement it. - */ if (cpu_has_lasx) *v |= CPUCFG2_LASX; @@ -349,24 +345,26 @@ static int kvm_check_cpucfg(int id, u64 val) switch (id) { case 2: - /* CPUCFG2 features checking */ if (!(val & CPUCFG2_LLFTP)) - /* The LLFTP must be set, as guest must has a constant timer */ - ret = -EINVAL; - else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) - /* Single and double float point must both be set when enable FP */ - ret = -EINVAL; - else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) - /* FP should be set when enable LSX */ - ret = -EINVAL; - else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) - /* LSX, FP should be set when enable LASX, and FP has been checked before. */ - ret = -EINVAL; - break; + /* Guests must have a constant timer */ + return -EINVAL; + if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when FP is enabled */ + return -EINVAL; + if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* LSX architecturally implies FP but val does not satisfy that */ + return -EINVAL; + if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LASX architecturally implies LSX and FP but val does not satisfy that */ + return -EINVAL; + return 0; default: - break; + /* + * Values for the other CPUCFG IDs are not being further validated + * besides the mask check above. + */ + return 0; } - return ret; } static int kvm_get_one_reg(struct kvm_vcpu *vcpu, -- Gitee From db3110b3ae5839741a9ff5151544fa8b3812da84 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 26 Jan 2024 16:22:07 +0800 Subject: [PATCH 192/953] LoongArch: KVM: Add returns to SIMD stubs ANBZ: #8436 commit 48ef9e87b407f89f230f804815af7ac2031ec17a upstream. The stubs for kvm_own/lsx()/kvm_own_lasx() when CONFIG_CPU_HAS_LSX or CONFIG_CPU_HAS_LASX is not defined should have a return value since they return an int, so add "return -EINVAL;" to the stubs. Fixes the build error: In file included from ../arch/loongarch/include/asm/kvm_csr.h:12, from ../arch/loongarch/kvm/interrupt.c:8: ../arch/loongarch/include/asm/kvm_vcpu.h: In function 'kvm_own_lasx': ../arch/loongarch/include/asm/kvm_vcpu.h:73:39: error: no return statement in function returning non-void [-Werror=return-type] 73 | static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } Fixes: db1ecca22edf ("LoongArch: KVM: Add LSX (128bit SIMD) support") Fixes: 118e10cd893d ("LoongArch: KVM: Add LASX (256bit SIMD) support") Signed-off-by: Randy Dunlap Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_vcpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index e71ceb88f29e..0cb4fdb8a9b5 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu); void kvm_save_lsx(struct loongarch_fpu *fpu); void kvm_restore_lsx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } #endif @@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu); void kvm_save_lasx(struct loongarch_fpu *fpu); void kvm_restore_lasx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif -- Gitee From e2616271321e885e9d05f66bc2e040cd91716a60 Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Sat, 2 Mar 2024 11:48:22 +0800 Subject: [PATCH 193/953] anolis: LoongArch: Adapted SECTION_SIZE_BITS with page size ANBZ: #8435 Signed-off-by: zhangtianyang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/include/asm/sparsemem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8..1f331ee584ef 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -8,7 +8,7 @@ * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ -#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define SECTION_SIZE_BITS 28 #define MAX_PHYSMEM_BITS 48 #ifdef CONFIG_SPARSEMEM_VMEMMAP -- Gitee From 96210845ad91685a1563a23921b2bd03e1bc37f1 Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Sat, 2 Mar 2024 11:48:23 +0800 Subject: [PATCH 194/953] anolis: LoongArch: Remove generic irq migration ANBZ: #8435 Signed-off-by: Jianmin Lv Signed-off-by: Juxin Gao Signed-off-by: Ming Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/Kconfig | 1 - arch/loongarch/include/asm/irq.h | 1 + arch/loongarch/kernel/irq.c | 36 ++++++++++++++++++++++++++++++++ arch/loongarch/kernel/smp.c | 3 +-- kernel/irq/Kconfig | 4 ++-- 5 files changed, 40 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 54a169dee80f..bad326ae58f2 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -428,7 +428,6 @@ config SMP config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP - select GENERIC_IRQ_MIGRATION help Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 722eb1aa726f..ed8e72db0dba 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -119,6 +119,7 @@ extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); +extern void fixup_irqs(void); #include diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 22562bb6fdc4..a1c1d9576393 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -96,6 +96,42 @@ static int __init get_ipi_irq(void) return -EINVAL; } +#ifdef CONFIG_HOTPLUG_CPU +static void handle_irq_affinity(void) +{ + struct irq_desc *desc; + struct irq_chip *chip; + unsigned int irq; + unsigned long flags; + struct cpumask *affinity; + + for_each_active_irq(irq) { + desc = irq_to_desc(irq); + if (!desc) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + affinity = desc->irq_data.common->affinity; + if (!cpumask_intersects(affinity, cpu_online_mask)) + cpumask_copy(affinity, cpu_online_mask); + + chip = irq_data_get_irq_chip(&desc->irq_data); + if (chip && chip->irq_set_affinity) + chip->irq_set_affinity(&desc->irq_data, + desc->irq_data.common->affinity, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} + +void fixup_irqs(void) +{ + handle_irq_affinity(); + irq_cpu_offline(); + clear_csr_ecfg(ECFG0_IM); +} +#endif + void __init init_IRQ(void) { int i, ret; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index a6f3403d20b5..5fbdd172a248 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -372,8 +372,7 @@ int loongson_cpu_disable(void) clear_cpu_sibling_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); - irq_migrate_all_off_this_cpu(); - clear_csr_ecfg(ECFG0_IM); + fixup_irqs(); local_irq_restore(flags); local_flush_tlb_all(); diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 2531f3496ab6..8a65b0c34b5a 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -148,5 +148,5 @@ config GENERIC_IRQ_MULTI_HANDLER # Do not even think of enabling this on any new platform config DEPRECATED_IRQ_CPU_ONOFFLINE bool - depends on CAVIUM_OCTEON_SOC - default CAVIUM_OCTEON_SOC + depends on CAVIUM_OCTEON_SOC || LOONGARCH + default CAVIUM_OCTEON_SOC || LOONGARCH -- Gitee From bf15f2a3f35aa47055411710fa9028ff92b54213 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Sat, 2 Mar 2024 11:48:24 +0800 Subject: [PATCH 195/953] anolis: irqchip/loongson-pch-pic: 7a1000 int_clear reg must use 64bit write. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/irqchip/irq-loongson-pch-pic.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 372215f2b9ed..3b150b6121fc 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,10 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_COUNT_PER_REG64 64 +#define PIC_REG64_COUNT 1 +#define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) +#define PIC_REG64_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG64) static int nr_pics; @@ -93,8 +97,8 @@ static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); irq_chip_unmask_parent(d); pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); @@ -141,8 +145,8 @@ static void pch_pic_ack_irq(struct irq_data *d) reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); } irq_chip_ack_parent(d); } @@ -235,13 +239,15 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_REG_COUNT; i++) { /* Clear IRQ cause registers, mask all interrupts */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i); - writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i); /* Clear auto bounce, we don't need that */ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i); writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i); /* Enable HTMSI transformer */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i); } + + for (i = 0; i < PIC_REG64_COUNT; i++) + writeq_relaxed((u64)-1, priv->base + PCH_PIC_CLR + 8 * i); } static int pch_pic_suspend(void) -- Gitee From beda9aa1f7bba0f0a68d64565c1b97917da3e3d6 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:57:13 +0800 Subject: [PATCH 196/953] anolis: LoongArch: kdump: Add memory reservation for old kernel ANBZ: #8435 After moving the old kernel memory reservation to elfcorehdr operation, avoid the elfcorehdr space from being destroyed. Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 45 +++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 8af027e89ad5..837feea45413 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -69,6 +69,8 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; +static phys_addr_t crashmem_start, crashmem_size; + /* * Setup information * @@ -206,16 +208,6 @@ static int __init early_parse_mem(char *p) return -EINVAL; } - /* - * If a user specifies memory size, we - * blow away any automatically generated - * size. - */ - if (usermem == 0) { - usermem = 1; - memblock_remove(memblock_start_of_DRAM(), - memblock_end_of_DRAM() - memblock_start_of_DRAM()); - } start = 0; size = memparse(p, &p); if (*p == '@') @@ -225,6 +217,23 @@ static int __init early_parse_mem(char *p) return -EINVAL; } + /* + * If a user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + if (!strstr(boot_command_line, "elfcorehdr")) { + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); + } else { + crashmem_start = start; + crashmem_size = size; + return 0; + } + } + if (!IS_ENABLED(CONFIG_NUMA)) memblock_add(start, size); else @@ -369,10 +378,26 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } +/* + * After the kdump operation is performed to enter the capture kernel, the + * memory area used by the previous production kernel should be reserved to + * avoid destroy to the captured data. + */ +static void reserve_oldmem_region(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (!is_kdump_kernel()) + return; + + memblock_cap_memory_range(crashmem_start, crashmem_size); +#endif +} + void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); + reserve_oldmem_region(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); -- Gitee From 4237fa66df9f334bcea9bee5838e44ebd481e428 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:51:10 +0800 Subject: [PATCH 197/953] anolis: LoongArch: kexec: Add compatibility with old interfaces ANBZ: #8435 Old interface: a0 = argc, a1 = argv, a2 = bootparam New interface: a0 = efi flag, a1 = cmdline, a2 = systemtab The following interfaces are not supported: a0 = efi flag, a1 = fdt pointer, a2 = 0 Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/machine_kexec.c | 45 +++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 2dcb9e003657..561706cb1e6d 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -59,6 +59,9 @@ static void kexec_image_info(const struct kimage *kimage) } } +#define MAX_ARGS 64 +#define KEXEC_CMDLINE_SIZE (COMMAND_LINE_SIZE * 2) + int machine_kexec_prepare(struct kimage *kimage) { int i; @@ -70,11 +73,49 @@ int machine_kexec_prepare(struct kimage *kimage) kimage->arch.efi_boot = fw_arg0; kimage->arch.systable_ptr = fw_arg2; + if (!fw_arg2) + pr_err("Small fdt mode is not supported!\n"); + /* Find the command line */ for (i = 0; i < kimage->nr_segments; i++) { if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + if (fw_arg0 < 2) { + /* New firmware */ + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + } else { + /* Old firmware */ + int argc = 0; + long offt; + char *ptr, *str; + unsigned long *argv; + + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + argv = (unsigned long *)kmalloc(KEXEC_CMDLINE_SIZE, GFP_KERNEL); + argv[argc++] = (unsigned long)(KEXEC_CMDLINE_ADDR + KEXEC_CMDLINE_SIZE/2); + str = (char *)argv + KEXEC_CMDLINE_SIZE/2; + + if (copy_from_user(str, kimage->segment[i].buf, KEXEC_CMDLINE_SIZE/2)) + return -EINVAL; + + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (long)(ptr - str + 1); + argv[argc++] = (unsigned long)argv + KEXEC_CMDLINE_SIZE/2 + offt; + } + ptr = strchr(ptr + 1, ' '); + } + + kimage->arch.efi_boot = argc; + kimage->arch.cmdline_ptr = (unsigned long)argv; + break; + } break; } } -- Gitee From c9a5527a4071c59723e99fbfec02ff980352d2f5 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:51:45 +0800 Subject: [PATCH 198/953] anolis: LoongArch: Fix kdump failure on v40 interface specification ANBZ: #8435 The old memory should be reserved after efi_runtime_init() to avoid destroying the EFI space and causing failure when executing svam(). Fix the following problems when executing kdump: [ 0.000000] The BIOS Version: Loongson-UDK2018-V2.0.04082-beta7 [ 0.000000] CPU 0 Unable to handle kernel paging request at virtual address 00000000fdeb0e7c, era == 00000000fdeb0e7c, ra == 90000000dae6585c [ 0.000000] Oops[#1]: [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.10.137+ #86 [ 0.000000] Hardware name: Loongson Loongson-3A5000-7A1000-1w-A2101/Loongson-LS3A5000-7A1000-1w-A2101, BIOS vUDK2018-LoongArch-V2.0.pre-beta8 06/15/2022 [ 0.000000] $ 0 : 0000000000000000 90000000dae6585c 90000000db200000 90000000db203840 [ 0.000000] $ 4 : 0000000000000078 0000000000000028 0000000000000001 00000000db203860 [ 0.000000] $ 8 : 0000000000000000 0000000000000040 90000000db203680 0000000000000000 [ 0.000000] $12 : 00000000fdeb0e7c ffffffffffffffc0 00000000fbffffff 0000000020000000 [ 0.000000] $16 : 000000000003e780 0000000020000000 90000000dad8c348 0000000000003fff [ 0.000000] $20 : 0000000000000018 90000000dad8bdd0 90000000db203850 0000000000000040 [ 0.000000] $24 : 000000000000000f 90000000db21a570 90000000daeb07a0 90000000db217000 [ 0.000000] $28 : 90000000db203858 0000000001ffffff 90000000db2171b0 0000000000000040 [ 0.000000] era : 00000000fdeb0e7c 0xfdeb0e7c [ 0.000000] ra : 90000000dae6585c set_virtual_map.isra.0+0x23c/0x394 [ 0.000000] CSR crmd: 90000000db21a570 [ 0.000000] CSR prmd: 00000000 [ 0.000000] CSR euen: 00000000 [ 0.000000] CSR ecfg: 90000000db203850 [ 0.000000] CSR estat: 90000000dae65800 [ 0.000000] ExcCode : 26 (SubCode 16b) [ 0.000000] PrId : 0014c012 (Loongson-64bit) [ 0.000000] Modules linked in: [ 0.000000] Process swapper (pid: 0, threadinfo=(____ptrval____), task=(____ptrval____)) [ 0.000000] Stack : 0000000000000001 00000000fdeb0e7c 0000000000036780 000000000003e780 [ 0.000000] 0000000000000006 0000000010000000 8000000010000000 0000000000010000 [ 0.000000] 8000000000000001 0000000000000005 00000000fde40000 90000000fde40000 [ 0.000000] 0000000000000100 800000000000000f 0000000000000006 00000000fdf40000 [ 0.000000] 90000000fdf40000 0000000000000300 800000000000000f 00000000000000b0 [ 0.000000] 0000000000000001 90000000da094cf0 0000000000000000 ffffffffffffffea [ 0.000000] 90000000db2039b8 ffff0a1000000609 0000000000000035 0000000000000030 [ 0.000000] 90000000dad7b258 0000000000000400 00000000000000b0 ffff0a1000000609 [ 0.000000] 90000000db2039a8 90000000db095730 000000007fffffff ffff0a1000000609 [ 0.000000] 90000000db203a90 90000000db203a30 90000000db2039d8 90000000db09570b [ 0.000000] ... [ 0.000000] Call Trace: [ 0.000000] [ 0.000000] Code: (Bad address in era) [ 0.000000] [ 0.000000] Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 837feea45413..14d885821533 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -395,10 +395,6 @@ static void reserve_oldmem_region(void) void __init platform_init(void) { - arch_reserve_vmcore(); - arch_parse_crashkernel(); - reserve_oldmem_region(); - #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); #endif @@ -436,6 +432,10 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + arch_reserve_vmcore(); + arch_parse_crashkernel(); + reserve_oldmem_region(); + if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From 63601a110e3c57cea9d460d5a1800593c8fdda01 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:52:31 +0800 Subject: [PATCH 199/953] anolis: LoongArch: kdump: Add high memory reservation ANBZ: #8435 Reserve high memory for the capture kernel to avoid kdump operation failure on 3C5000 machines with old firmware. Signed-off-by: Youling Tang Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 49 ++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 14d885821533..4bd592c5019d 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -383,13 +383,49 @@ static void __init bootcmdline_init(char **cmdline_p) * memory area used by the previous production kernel should be reserved to * avoid destroy to the captured data. */ -static void reserve_oldmem_region(void) +static void reserve_oldmem_region(int node, unsigned long s0, unsigned long e0) { #ifdef CONFIG_CRASH_DUMP + unsigned long s1, e1; + if (!is_kdump_kernel()) return; - memblock_cap_memory_range(crashmem_start, crashmem_size); + if ((e0 - s0) > (SZ_1G >> PAGE_SHIFT)) + e0 = e0 - (SZ_512M >> PAGE_SHIFT); + + /* crashmem_start is crashk_res reserved by primary production kernel */ + s1 = PFN_UP(crashmem_start); + e1 = PFN_DOWN(crashmem_start + crashmem_size); + + if (s1 == 0) + return; + + if (node == 0) { + memblock_reserve(PFN_PHYS(s0), (s1 - s0) << PAGE_SHIFT); + memblock_reserve(PFN_PHYS(e1), (e0 - e1) << PAGE_SHIFT); + } else { + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); + } +#endif +} + +/* Traditionally, LoongArch's contiguous low memory is 256M, so crashkernel=X@Y is + * unable to be large enough in some cases. Thus, if the total memory of a node + * is more than 1GB, we reserve the top 512MB for the capture kernel + */ +static void reserve_crashm_region(int node, unsigned long s0, unsigned long e0) +{ +#ifdef CONFIG_KEXEC + if (crashk_res.start == crashk_res.end) + return; + + if ((e0 - s0) <= (SZ_1G >> PAGE_SHIFT)) + return; + + s0 = e0 - (SZ_512M >> PAGE_SHIFT); + + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); #endif } @@ -432,9 +468,16 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + unsigned int node; + unsigned long start_pfn, end_pfn; + arch_reserve_vmcore(); arch_parse_crashkernel(); - reserve_oldmem_region(); + for_each_online_node(node) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + reserve_crashm_region(node, start_pfn, end_pfn); + reserve_oldmem_region(node, start_pfn, end_pfn); + } if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From 48a2dc079baf9cb997fa43c942f515b826b22d34 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 4 Mar 2024 13:52:57 +0800 Subject: [PATCH 200/953] anolis: irqchip/loongson-liointc: Set different isr for differnt core ANBZ: #8435 Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/irqchip/irq-loongson-liointc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index e4b33aed1c97..0262cbefe9dd 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -28,7 +28,7 @@ #define LIOINTC_INTC_CHIP_START 0x20 -#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20) +#define LIOINTC_REG_INTC_STATUS(cpuid) (LIOINTC_INTC_CHIP_START + 0x20 + (cpuid) * 8) #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) @@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision, goto out_free_priv; for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i); for (i = 0; i < LIOINTC_NUM_PARENT; i++) priv->handler[i].parent_int_map = parent_int_map[i]; -- Gitee From 9bd8001239f0e9046cd0a2f67a74cab98a980855 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 2 Mar 2024 11:48:31 +0800 Subject: [PATCH 201/953] anolis: drm/radeon: Workaround radeon driver bug for Loongson ANBZ: #8435 Radeon driver can not handle the interrupt is faster than DMA data, so irq handler must update an old ih.rptr value in IH_RB_RPTR register to enable interrupt again when interrupt is faster than DMA data. Signed-off-by: Huacai Chen Signed-off-by: Zhijie Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/gpu/drm/radeon/cik.c | 3 +++ drivers/gpu/drm/radeon/evergreen.c | 3 +++ drivers/gpu/drm/radeon/r600.c | 3 +++ drivers/gpu/drm/radeon/si.c | 3 +++ 4 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 10be30366c2b..dd7c2481c94f 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8093,6 +8093,9 @@ int cik_irq_process(struct radeon_device *rdev) if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f0ae087be914..84ce0e5fc72a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4922,6 +4922,9 @@ int evergreen_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a17b95eec65f..43c1fde01708 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4328,6 +4328,9 @@ int r600_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 85e9cba49cec..32c8803431a1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6442,6 +6442,9 @@ int si_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ -- Gitee From ba414056ed9fc75ff3aa5fe414f90846da376c02 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Mar 2024 11:48:32 +0800 Subject: [PATCH 202/953] anolis: usb: xhci: add XHCI_NO_SOFT_RETRY quirk for EJ188 ANBZ: #8435 EJ188 has similar problems as the upstream commit commit a4a251f8c235 ("usb: xhci: do not perform Soft Retry for some xHCI hosts") so we add XHCI_NO_SOFT_RETRY quirk for it. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/usb/host/xhci-pci.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index d6fc08e5db8f..0a0a4702ef26 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -36,6 +36,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -461,6 +462,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; -- Gitee From 9e7f88b514da50647617c51c85e86c2dfb9b6fc2 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Mar 2024 11:48:33 +0800 Subject: [PATCH 203/953] anolis: net: stmmac: fix potential double free of dma descriptor resources ANBZ: #8435 reset the dma descriptor related resource's pointer to NULL,otherwise a potential double free problem may be triggered: stmmac_open alloc_dma_desc_resources init_dma_desc_rings stmmac_hw_setup (Failed) goto init_error; free_dma_desc_resources(priv); (DMA related resource pointer not reset to NULL) ... stmmac_open alloc_dma_desc_resources alloc_dma_tx_desc_resources (Failed) free_dma_tx_desc_resources (Double free of tx_q->tx_skbuff_dma tx_q->tx_skbuff) Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d1adb102a1d4..bb76c333e9c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1941,13 +1941,18 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->dma_rx = NULL; + rx_q->dma_erx = NULL; if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); + rx_q->buf_pool = NULL; + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); + rx_q->page_pool = NULL; } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, @@ -1993,8 +1998,14 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + tx_q->dma_etx = NULL; + tx_q->dma_entx = NULL; + tx_q->dma_tx = NULL; + kfree(tx_q->tx_skbuff_dma); + tx_q->tx_skbuff_dma = NULL; kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff = NULL; } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, -- Gitee From 0a5ff450740bf99741799abbd029f4985af8241f Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Tue, 5 Mar 2024 14:28:47 +0800 Subject: [PATCH 204/953] anolis: Update config for loongarch ANBZ: #8435 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/configs/loongson3_defconfig | 1570 ++++++++++++++++++-- 1 file changed, 1419 insertions(+), 151 deletions(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 101c9d83cd43..3748aa27e993 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -4,8 +4,9 @@ CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_PREEMPT=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -19,6 +20,7 @@ CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y @@ -34,42 +36,25 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y -CONFIG_PERF_EVENTS=y -CONFIG_LOONGARCH=y -CONFIG_64BIT=y -CONFIG_MACH_LOONGSON64=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_250=y -CONFIG_DMI=y -CONFIG_EFI=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y -CONFIG_NR_CPUS=64 +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 CONFIG_NUMA=y -CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y CONFIG_RANDOMIZE_BASE=y -CONFIG_SUSPEND=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y -CONFIG_ACPI=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_EFI_ZBOOT=y -CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_JUMP_LABEL=y @@ -78,37 +63,51 @@ CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y CONFIG_Z3FOLD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_UNIX=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y CONFIG_XFRM_USER=y -CONFIG_NET_KEY=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y -CONFIG_INET=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y @@ -124,27 +123,83 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_UDP_DIAG=y +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m -CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -157,22 +212,35 @@ CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m @@ -188,7 +256,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -197,6 +264,7 @@ CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m @@ -206,10 +274,12 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -218,8 +288,25 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y CONFIG_IP_VS_PROTO_ESP=y @@ -227,11 +314,24 @@ CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_NFCT=y -CONFIG_NF_TABLES_IPV4=y +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -252,18 +352,21 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_IP6_NF_IPTABLES=y +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m @@ -273,76 +376,212 @@ CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_IP_SCTP=m -CONFIG_RDS=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_CGROUP=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_NETLINK_DIAG=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_MTK=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_INTEL=y -CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m -CONFIG_BT_VIRTIO=m CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -350,15 +589,19 @@ CONFIG_RFKILL=m CONFIG_RFKILL_INPUT=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y -CONFIG_CEPH_LIB=m -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y CONFIG_PCIEAER=y -# CONFIG_PCIEASPM is not set +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m CONFIG_PCI_IOV=y -CONFIG_HOTPLUG_PCI=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set CONFIG_YENTA=m CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y @@ -370,7 +613,12 @@ CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m CONFIG_MTD=m CONFIG_MTD_BLOCK=m CONFIG_MTD_CFI=m @@ -380,22 +628,31 @@ CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_ZSTD=y -CONFIG_BLK_DEV_LOOP=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_VIRTIO_BLK=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_NVME=y +CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m @@ -405,18 +662,40 @@ CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m CONFIG_EEPROM_AT24=m -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_SAS_ATA=y CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y @@ -425,8 +704,10 @@ CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m @@ -434,35 +715,46 @@ CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_DWC=y +CONFIG_ATA_PIIX=m CONFIG_PATA_ATIIXP=y -CONFIG_PATA_PCMCIA=m +CONFIG_ATA_GENERIC=m CONFIG_MD=y -CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=y +CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m @@ -470,18 +762,45 @@ CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m CONFIG_ISCSI_TARGET=m -CONFIG_NETDEVICES=y +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m CONFIG_BONDING=m -CONFIG_DUMMY=y +CONFIG_DUMMY=m CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m -CONFIG_VXLAN=y +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m CONFIG_RIONET=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set @@ -493,36 +812,63 @@ CONFIG_VIRTIO_NET=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y # CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m # CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_I825XX is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m # CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m CONFIG_8139TOO=m -CONFIG_R8169=y +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set @@ -531,46 +877,150 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_SIS is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set -CONFIG_NGBE=y -CONFIG_TXGBE=y +CONFIG_NGBE=m +CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set # CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y # CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_HOSTAP=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m CONFIG_RT2X00=m +CONFIG_RT2800PCI=m CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -582,29 +1032,80 @@ CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m -CONFIG_RTW88=m -CONFIG_RTW88_8822BE=m -CONFIG_RTW88_8822CE=m -CONFIG_RTW88_8723DE=m -CONFIG_RTW88_8821CE=m -CONFIG_RTW89=m -CONFIG_RTW89_8852AE=m -CONFIG_RTW89_8852CE=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set CONFIG_ZD1211RW=m CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_XTKBD=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_PS2_SENTELIC=y CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y CONFIG_SERIO_SERPORT=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 @@ -612,37 +1113,190 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m CONFIG_PRINTER=m +CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=y -CONFIG_I2C_LS2X=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m CONFIG_SPI=y -CONFIG_SPI_LOONGSON_PCI=m +CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m CONFIG_PINCTRL=y CONFIG_PINCTRL_LOONGSON2=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_LOONGSON=y +CONFIG_GPIO_AMDPT=m CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_RESTART=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_POWER_RESET_SYSCON_POWEROFF=y -CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y @@ -656,100 +1310,580 @@ CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y CONFIG_FB_LS2K500=m -CONFIG_LCD_CLASS_DEVICE=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y -CONFIG_SND_HDA_INTEL=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y CONFIG_USB=y -CONFIG_USB_OTG=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m CONFIG_USB_MON=y CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_ACM=m +CONFIG_USB_UHCI_HCD=y CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m CONFIG_USB_STORAGE=m CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m CONFIG_USB_GADGET=y CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m CONFIG_RTC_DRV_LOONGSON=y CONFIG_DMADEVICES=y -CONFIG_UIO=m +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=m @@ -785,7 +1919,27 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -806,37 +1960,41 @@ CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m +CONFIG_QFMT_V2=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y -CONFIG_FSCACHE=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y +CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y -CONFIG_UDF_FS=y +CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y CONFIG_ORANGEFS_FS=m CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y @@ -845,7 +2003,8 @@ CONFIG_HFSPLUS_FS=m CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_CRAMFS=m -CONFIG_SQUASHFS=y +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y @@ -853,79 +2012,188 @@ CONFIG_SQUASHFS_XZ=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m -CONFIG_PSTORE_LZO_COMPRESS=m -CONFIG_PSTORE_LZ4_COMPRESS=m -CONFIG_PSTORE_LZ4HC_COMPRESS=m -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y CONFIG_EROFS_FS_PCPU_KTHREAD=y CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y -CONFIG_ROOT_NFS=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y CONFIG_KEY_DH_OPERATIONS=y CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set -- Gitee From 7454ca0cafb1840fcefa9817b5f790a45c7b00ee Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 25 Dec 2023 18:19:33 +0800 Subject: [PATCH 205/953] anolis: ALSA: hda: Add support of Zhaoxin SB HDAC ANBZ: #7809 Add some special initialization for Zhaoxin SB HDAC. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/hda_intel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 6176fc743e50..a36900035337 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1549,7 +1549,8 @@ static int check_position_fix(struct azx *chip, int fix) } /* Check VIA/ATI HD Audio Controller exist */ - if (chip->driver_type == AZX_DRIVER_VIA) { + if (chip->driver_type == AZX_DRIVER_VIA || + chip->driver_type == AZX_DRIVER_ZHAOXIN) { dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } @@ -1703,7 +1704,7 @@ static void azx_check_snoop_available(struct azx *chip) snoop = true; if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && - chip->driver_type == AZX_DRIVER_VIA) { + (chip->driver_type == AZX_DRIVER_VIA || chip->driver_type == AZX_DRIVER_ZHAOXIN)) { /* force to non-snoop mode for a new VIA controller * when BIOS is set */ -- Gitee From bfd73f6905519fad8feef2c54cd2a71742904a1c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 26 Jan 2024 15:38:59 +0800 Subject: [PATCH 206/953] anolis: ALSA: hda: Add support of Zhaoxin NB HDAC ANBZ: #7809 Add the new PCI ID 0x1d17 0x9141/0x9142/0x9144 Zhaoxin NB HDAC support. And add some special initialization for Zhaoxin NB HDAC. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/hda_controller.c | 17 ++++++++++- sound/pci/hda/hda_controller.h | 3 ++ sound/pci/hda/hda_intel.c | 56 ++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 406779625fb5..b69e7b94673c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1061,6 +1061,16 @@ static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) } } +static void azx_rirb_zxdelay(struct azx *chip, int enable) +{ + if (chip->remap_diu_addr) { + if (!enable) + writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); + else + writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); + } +} + irqreturn_t azx_interrupt(int irq, void *dev_id) { struct azx *chip = dev_id; @@ -1103,9 +1113,14 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { - if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) + if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || + (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { + azx_rirb_zxdelay(chip, 1); udelay(80); + } snd_hdac_bus_update_rirb(bus); + if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) + azx_rirb_zxdelay(chip, 0); } } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 8556031bcd68..9db89f4c7b3f 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,6 +45,7 @@ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ +#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 31) /* Put a delay before read */ enum { AZX_SNOOP_TYPE_NONE, @@ -143,6 +144,8 @@ struct azx { unsigned int disabled:1; /* disabled by vga_switcheroo */ unsigned int pm_prepared:1; + void __iomem *remap_diu_addr; + /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a36900035337..a3236ea1df94 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -237,6 +237,7 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, + AZX_DRIVER_ZXHDMI, AZX_DRIVER_LOONGSON, AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, @@ -350,6 +351,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", + [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", @@ -373,6 +375,31 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg, pci_write_config_byte(pci, reg, data); } +static int azx_init_pci_zx(struct azx *chip) +{ + struct snd_card *card = chip->card; + unsigned int diu_reg; + struct pci_dev *diu_pci = NULL; + + azx_bus(chip)->polling_mode = 1; + diu_pci = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x3a03, NULL); + if (!diu_pci) { + dev_info(card->dev, "zx_hda no KX-5000 device.\n"); + return -ENXIO; + } + pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); + chip->remap_diu_addr = ioremap(diu_reg, 0x50000); + pci_dev_put(diu_pci); + dev_info(card->dev, "zx_hda %x %p\n", diu_reg, chip->remap_diu_addr); + return 0; +} + +static void azx_free_pci_zx(struct azx *chip) +{ + if (chip->remap_diu_addr) + iounmap(chip->remap_diu_addr); +} + static void azx_init_pci(struct azx *chip) { int snoop_type = azx_get_snoop_type(chip); @@ -1362,6 +1389,9 @@ static void azx_free(struct azx *chip) hda->init_failed = 1; /* to be sure */ complete_all(&hda->probe_wait); + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_free_pci_zx(chip); + if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); @@ -1756,6 +1786,8 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; + case AZX_DRIVER_ZXHDMI: + return 128; default: return 32; } @@ -1885,6 +1917,11 @@ static int azx_first_init(struct azx *chip) chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) bus->hygon_dword_access = 1; + chip->remap_diu_addr = NULL; + + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_init_pci_zx(chip); + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -1986,6 +2023,7 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: + case AZX_DRIVER_ZXHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2700,6 +2738,15 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(VIA, 0x9170), .driver_data = AZX_DRIVER_GENERIC }, /* VIA GFX VT6122/VX11 */ { PCI_VDEVICE(VIA, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* SIS966 */ { PCI_VDEVICE(SI, 0x7502), .driver_data = AZX_DRIVER_SIS }, /* ULI M5461 */ @@ -2755,6 +2802,15 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, + { PCI_VDEVICE(ZHAOXIN, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(ZHAOXIN, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON }, -- Gitee From 1c625052190b7c93a8114fb680f6ad8918c657ec Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 25 Dec 2023 18:19:35 +0800 Subject: [PATCH 207/953] anolis: ALSA: hda: Add support of Zhaoxin NB HDAC codec ANBZ: #7809 Add Zhaoxin NB HDAC codec support. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/patch_hdmi.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 038db8902c9e..3feafe29bda1 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4494,6 +4494,20 @@ static int patch_via_hdmi(struct hda_codec *codec) return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } +/* Zhaoxin HDMI Implementation */ +static int patch_zhaoxin_hdmi(struct hda_codec *codec) +{ + int err; + + err = patch_generic_hdmi(codec); + codec->no_sticky_stream = 1; + + if (err) + return err; + + return 0; +} + static int patch_gf_hdmi(struct hda_codec *codec) { int err; @@ -4616,6 +4630,15 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4648,6 +4671,15 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), {} /* terminator */ -- Gitee From 6eb7dd8b5e071f4fcfcc3027cc9f0bbefbeab712 Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Tue, 10 Oct 2023 12:42:36 +0200 Subject: [PATCH 208/953] x86/resctrl: Rename arch_has_sparse_bitmaps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 39c6eed1f61594f737160e498d29673edbd9eefd upstream. Rename arch_has_sparse_bitmaps to arch_has_sparse_bitmasks to ensure consistent terminology throughout resctrl. Intel-SIG: commit 39c6eed1f615 x86/resctrl: Rename arch_has_sparse_bitmaps. Incremental backporting patches for Intel RDT on Intel Xeon platform. Suggested-by: Reinette Chatre Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/e330fcdae873ef1a831e707025a4b70fa346666e.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/core.c | 4 ++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 ++-- include/linux/resctrl.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index d99584690124..b97bc4bfba16 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -871,7 +871,7 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = false; + r->cache.arch_has_sparse_bitmasks = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -891,7 +891,7 @@ static __init void rdt_init_res_defs_amd(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = true; + r->cache.arch_has_sparse_bitmasks = true; r->cache.arch_has_per_cpu_cfg = true; r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index b44c487727d4..ab45012288bb 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -113,8 +113,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - /* Are non-contiguous bitmaps allowed? */ - if (!r->cache.arch_has_sparse_bitmaps && + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfec5..66942d7fba7f 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -94,7 +94,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +102,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; -- Gitee From e51853dbc12df20c61921b1c8163811260acc13b Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Tue, 10 Oct 2023 12:42:37 +0200 Subject: [PATCH 209/953] x86/resctrl: Enable non-contiguous CBMs in Intel CAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 0e3cd31f6e9074886dea5a999bfcc563d144e7de upstream. The setting for non-contiguous 1s support in Intel CAT is hardcoded to false. On these systems, writing non-contiguous 1s into the schemata file will fail before resctrl passes the value to the hardware. In Intel CAT CPUID.0x10.1:ECX[3] and CPUID.0x10.2:ECX[3] stopped being reserved and now carry information about non-contiguous 1s value support for L3 and L2 cache respectively. The CAT capacity bitmask (CBM) supports a non-contiguous 1s value if the bit is set. The exception are Haswell systems where non-contiguous 1s value support needs to stay disabled since they can't make use of CPUID for Cache allocation. Intel-SIG: commit 0e3cd31f6e90 x86/resctrl: Enable non-contiguous CBMs in Intel CAT. Incremental backporting patches for Intel RDT on Intel Xeon platform. Originally-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/1849b487256fe4de40b30f88450cba3d9abc9171.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/core.c | 9 ++++++--- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 10 ++++++---- arch/x86/kernel/cpu/resctrl/internal.h | 9 +++++++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index b97bc4bfba16..a8a6c6dd689c 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -152,6 +152,7 @@ static inline void cache_alloc_hsw_probe(void) r->cache.cbm_len = 20; r->cache.shareable_bits = 0xc0000; r->cache.min_cbm_bits = 2; + r->cache.arch_has_sparse_bitmasks = false; r->alloc_capable = true; rdt_alloc_capable = true; @@ -265,15 +266,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; + union cpuid_0x10_x_ecx ecx; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx; - cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); + cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->cache.cbm_len = eax.split.cbm_len + 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->cache.shareable_bits = ebx & r->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; r->alloc_capable = true; } @@ -871,7 +875,6 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmasks = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index ab45012288bb..beccb0e87ba7 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -87,10 +87,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, /* * Check whether a cache bit mask is valid. - * For Intel the SDM says: - * Please note that all (and only) contiguous '1' combinations - * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). - * Additionally Haswell requires at least two bits set. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. * AMD allows non-contiguous bitmasks. */ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 566386abb877..ca86a96e80c2 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -490,6 +490,15 @@ union cpuid_0x10_3_eax { unsigned int full; }; +/* CPUID.(EAX=10H, ECX=ResID).ECX */ +union cpuid_0x10_x_ecx { + struct { + unsigned int reserved:3; + unsigned int noncont:1; + } split; + unsigned int full; +}; + /* CPUID.(EAX=10H, ECX=ResID).EDX */ union cpuid_0x10_x_edx { struct { -- Gitee From 853a29934d47e4c454581707db435c00c47bee12 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 10 Oct 2023 12:42:38 +0200 Subject: [PATCH 210/953] x86/resctrl: Add sparse_masks file in info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 4dba8f10b8fef9c5b0f9ed83dd1af91a1795ead1 upstream. Add the interface in resctrl FS to show if sparse cache allocation bit masks are supported on the platform. Reading the file returns either a "1" if non-contiguous 1s are supported and "0" otherwise. The file path is /sys/fs/resctrl/info/{resource}/sparse_masks, where {resource} can be either "L2" or "L3". Intel-SIG: commit 4dba8f10b8fe x86/resctrl: Add sparse_masks file in info. Incremental backporting patches for Intel RDT on Intel Xeon platform. Signed-off-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/7300535160beba41fd8aa073749ec1ee29b4621f.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d82d5de183b1..1c0f00cd212d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1117,6 +1117,17 @@ static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) } } +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + /** * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other * @r: Resource to which domain instance @d belongs. @@ -1841,6 +1852,13 @@ static struct rftype res_common_files[] = { .seq_show = rdtgroup_size_show, .fflags = RF_CTRL_BASE, }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + }, }; -- Gitee From aa4b907785582a478c5a9ae37f4836ec633f5de8 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 10 Oct 2023 12:42:39 +0200 Subject: [PATCH 211/953] Documentation/x86: Document resctrl's new sparse_masks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit aaa5fa35743ab9f0726568611a85e3e15349b9bf upstream. The documentation mentions that non-contiguous bit masks are not supported in Intel Cache Allocation Technology (CAT). Update the documentation on how to determine if sparse bit masks are allowed in L2 and L3 CAT. Intel-SIG: commit aaa5fa35743a Documentation/x86: Document resctrl's new sparse_masks. Incremental backporting patches for Intel RDT on Intel Xeon platform. Signed-off-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/3e9610997164f648e15c5c2e90d4944ce36504fe.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- Documentation/arch/x86/resctrl.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index cb05d90111b4..4c6421e2aa31 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -124,6 +124,13 @@ related to allocation: "P": Corresponding region is pseudo-locked. No sharing allowed. +"sparse_masks": + Indicates if non-contiguous 1s value in CBM is supported. + + "0": + Only contiguous 1s value in CBM is supported. + "1": + Non-contiguous 1s value in CBM is supported. Memory bandwidth(MB) subdirectory contains the following files with respect to allocation: @@ -445,12 +452,13 @@ For cache resources we describe the portion of the cache that is available for allocation using a bitmask. The maximum value of the mask is defined by each cpu model (and may be different for different cache levels). It is found using CPUID, but is also provided in the "info" directory of -the resctrl file system in "info/{resource}/cbm_mask". Intel hardware +the resctrl file system in "info/{resource}/cbm_mask". Some Intel hardware requires that these masks have all the '1' bits in a contiguous block. So 0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9 -and 0xA are not. On a system with a 20-bit mask each bit represents 5% -of the capacity of the cache. You could partition the cache into four -equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. +and 0xA are not. Check /sys/fs/resctrl/info/{resource}/sparse_masks +if non-contiguous 1s value is supported. On a system with a 20-bit mask +each bit represents 5% of the capacity of the cache. You could partition +the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. Memory bandwidth Allocation and monitoring ========================================== -- Gitee From 50b5dc8c185617fc8199bec6a35779d6124ff606 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 17:08:46 +0800 Subject: [PATCH 212/953] anolis: fs: record page or bio info while process is waitting on it ANBZ: #8419 If one process context is stucked in wait_on_buffer(), lock_buffer(), lock_page() and wait_on_page_writeback() and wait_on_bit_io(), it's hard to tell true reason, for example, whether this page is under io, or this page is just locked too long by other process context. Normally io request has multiple bios, and every bio contains multiple pages which will hold data to be read from or written to device, so here we record page info or bio info in task_struct while process calls lock_page(), lock_buffer(), wait_on_page_writeback(), wait_on_buffer() and wait_on_bit_io(), we add a new proce interface: [lege@localhost linux]$ cat /proc/4516/wait_res 1 ffffd0969f95d3c0 4295369599 4295381596 Above info means that thread 4516 is waitting on a page, address is ffffd0969f95d3c0, and has waited for 11997ms. First field denotes the page address process is waitting on. Second field denotes the wait moment and the third denotes current moment. In practice, if we found a process waitting on one page for too long time, we can get page's address by reading /proc/$pid/wait_page, and search this page address in all block dev' /sys/kernel/debug/block/${devname}/rq_hang, if search operation hits one, we can get the request and know why this io request hangs that long. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- block/bio.c | 3 +++ fs/jbd2/transaction.c | 2 ++ fs/proc/base.c | 11 +++++++++++ include/linux/buffer_head.h | 10 ++++++++-- include/linux/sched.h | 37 +++++++++++++++++++++++++++++++++++++ mm/filemap.c | 2 ++ 6 files changed, 63 insertions(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index 62419aa09d73..c87160fc8974 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1376,12 +1376,15 @@ int submit_bio_wait(struct bio *bio) /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; + + task_set_wait_res(TASK_WAIT_BIO, bio); if (hang_check) while (!wait_for_completion_io_timeout(&done, hang_check * (HZ/2))) ; else wait_for_completion_io(&done); + task_clear_wait_res(); return blk_status_to_errno(bio->bi_status); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 773ff69e9bd3..471825347a2f 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1113,7 +1113,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); spin_unlock(&jh->b_state_lock); + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); + task_clear_wait_res(); goto repeat; } diff --git a/fs/proc/base.c b/fs/proc/base.c index ffd54617c354..f6f5d4a210be 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -568,6 +568,15 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, return 0; } +static int proc_wait_res(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "%d %px %lu %lu\n", task->wait_res_type, task->wait_folio, + task->wait_moment, jiffies); + + return 0; +} + struct limit_names { const char *name; const char *unit; @@ -3352,6 +3361,7 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3691,6 +3701,7 @@ static const struct pid_entry tid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 44e9de51eedf..9711ae81d988 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -369,8 +369,11 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) static inline void wait_on_buffer(struct buffer_head *bh) { might_sleep(); - if (buffer_locked(bh)) + if (buffer_locked(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __wait_on_buffer(bh); + task_clear_wait_res(); + } } static inline int trylock_buffer(struct buffer_head *bh) @@ -381,8 +384,11 @@ static inline int trylock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (!trylock_buffer(bh)) + if (!trylock_buffer(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __lock_buffer(bh); + task_clear_wait_res(); + } } static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, diff --git a/include/linux/sched.h b/include/linux/sched.h index 77f01ac385f7..3f90d9ece7bd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1536,6 +1536,13 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif + int wait_res_type; + union { + struct folio *wait_folio; + struct bio *wait_bio; + }; + unsigned long wait_moment; + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1553,6 +1560,36 @@ struct task_struct { */ }; +enum { + TASK_WAIT_FOLIO = 1, + TASK_WAIT_BIO, +}; + +static inline void task_set_wait_res(int type, void *res) +{ + switch (type) { + case TASK_WAIT_FOLIO: + current->wait_folio = (struct folio *)res; + break; + case TASK_WAIT_BIO: + current->wait_bio = (struct bio *)res; + break; + default: + current->wait_folio = NULL; + break; + } + + current->wait_res_type = type; + current->wait_moment = jiffies; +} + +static inline void task_clear_wait_res(void) +{ + current->wait_folio = NULL; + current->wait_res_type = 0; + current->wait_moment = 0; +} + static inline struct pid *task_pid(struct task_struct *task) { return task->thread_pid; diff --git a/mm/filemap.c b/mm/filemap.c index 3dba1792beba..974fc1947333 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1626,8 +1626,10 @@ EXPORT_SYMBOL(folio_end_writeback); */ void __folio_lock(struct folio *folio) { + task_set_wait_res(TASK_WAIT_FOLIO, folio); folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, EXCLUSIVE); + task_clear_wait_res(); } EXPORT_SYMBOL(__folio_lock); -- Gitee From cbbba8688b425327433eae85e7e92b1203fc86f6 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 17:21:45 +0800 Subject: [PATCH 213/953] anolis: blk: add iohang check function ANBZ: #8419 Background: We do not have a dependable block layer interface to determine whether block device has io requests which have not been completed for somewhat long time. Currently we have 'in_flight' interface, it counts the number of I/O requests that have been issued to the device driver but have not yet completed, and it does not include I/O requests that are in the queue but not yet issued to the device driver, which means it will not count io requests that have been stucked in block layer. Also say that there are steady io requests issued to device driver, 'in_flight' maybe always non-zero, but you could not determine whether there is one io request which has not been completed for too long. Solution: To find io requests which have not been completed for too long, here add 3 new inferfaces: /sys/block/vdb/queue/hang_threshold If one io request's running time has been greater than this value, count this io as hang. /sys/block/vdb/hang Show read/write io requests' hang counter. /sys/kernel/debug/block/vdb/rq_hang Show all hang io requests's detailed info, like below: ffff97db96301200 {.op=WRITE, .cmd_flags=SYNC, .rq_flags=STARTED| ELVPRIV|IO_STAT|STATS, .state=in_flight, .tag=30, .internal_tag=169, .start_time_ns=140634088407, .io_start_time_ns=140634102958, .current_time=146497371953, .bio = ffff97db91e8e000, .bio_pages = { ffffd096a0602540 }, .bio = ffff97db91e8ec00, .bio_pages = { ffffd096a070eec0 }, .bio = ffff97db91e8f600, .bio_pages = { ffffd096a0424cc0 }, .bio = ffff97db91e8f300, .bio_pages = { ffffd096a0600a80 }} With above info, we can easily see this request's latency distribution, and see next patch for bio_pages's usage. Note this feature needs CONFIG_BLK_DEBUG_FS to be enabled. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- block/blk-core.c | 1 + block/blk-mq-debugfs.c | 81 +++++++++++++++++++++++++++++++++++++++++ block/blk-mq.c | 28 ++++++++++++++ block/blk-mq.h | 2 + block/blk-settings.c | 7 ++++ block/blk-sysfs.c | 22 +++++++++++ block/blk.h | 2 + block/genhd.c | 19 ++++++++++ block/partitions/core.c | 2 + include/linux/blkdev.h | 9 +++++ 10 files changed, 173 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index 2eca76ccf4ee..c18d1186e10b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -403,6 +403,7 @@ struct request_queue *blk_alloc_queue(int node_id) return NULL; q->last_merge = NULL; + q->rq_hang_threshold = BLK_REQ_HANG_THRESHOLD; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b2..271535f56bd2 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -155,12 +155,47 @@ static ssize_t queue_state_write(void *data, const char __user *buf, return count; } +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq); + +static bool blk_mq_check_rq_hang(struct request *rq, void *priv) +{ + struct seq_file *m = priv; + u64 now = ktime_get_ns(); + u64 duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration < rq->q->rq_hang_threshold) + return true; + + /* See comments in blk_mq_check_expired() */ + if (!req_ref_inc_not_zero(rq)) + return true; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq->q->rq_hang_threshold) + blk_mq_debugfs_rq_hang_show(m, rq); + + blk_mq_put_rq_ref(rq); + + return true; + +} + +static int queue_rq_hang_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, m); + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, + { "rq_hang", 0400, queue_rq_hang_show, NULL }, { }, }; @@ -310,6 +345,52 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) +{ + const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; + const unsigned int op = req_op(rq); + const char *op_str = blk_op_str(op); + struct bio *bio; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + seq_printf(m, "%p {.op=", rq); + if (strcmp(op_str, "UNKNOWN") == 0) + seq_printf(m, "%u", op); + else + seq_printf(m, "%s", op_str); + seq_puts(m, ", .cmd_flags="); + blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, + ARRAY_SIZE(cmd_flag_name)); + seq_puts(m, ", .rq_flags="); + blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, + ARRAY_SIZE(rqf_name)); + seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); + seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, + rq->internal_tag); + seq_printf(m, ", .start_time_ns=%llu", rq->start_time_ns); + seq_printf(m, ", .io_start_time_ns=%llu", rq->io_start_time_ns); + seq_printf(m, ", .current_time=%llu", ktime_get_ns()); + + __rq_for_each_bio(bio, rq) { + seq_printf(m, ", .bio = %px", bio); + seq_printf(m, ", .sector = %llu, .len=%u", + bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + seq_puts(m, ", .bio_pages = { "); + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + if (!page) + continue; + seq_printf(m, "%px ", page); + } + seq_puts(m, "}"); + } + if (mq_ops->show_rq) + mq_ops->show_rq(m, rq); + seq_puts(m, "}\n"); +} + static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 257b0addd47e..464e07792e8c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -89,6 +89,11 @@ struct mq_inflight { unsigned int inflight[2]; }; +struct mq_hang { + struct block_device *part; + unsigned int hang[2]; +}; + static bool blk_mq_check_inflight(struct request *rq, void *priv) { struct mq_inflight *mi = priv; @@ -121,6 +126,29 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, inflight[1] = mi.inflight[1]; } +static bool blk_mq_check_hang(struct request *rq, void *priv) +{ + struct mq_hang *mh = priv; + u64 now = ktime_get_ns(), duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if ((duration >= rq->q->rq_hang_threshold) && + (!mh->part->bd_partno || rq->part == mh->part)) + mh->hang[rq_data_dir(rq)]++; + + return true; +} + +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]) +{ + struct mq_hang mh = { .part = part }; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_hang, &mh); + hang[0] = mh.hang[0]; + hang[1] = mh.hang[1]; +} + void blk_freeze_queue_start(struct request_queue *q) { mutex_lock(&q->mq_freeze_lock); diff --git a/block/blk-mq.h b/block/blk-mq.h index 1743857e0b01..986d6bcbeefa 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -240,6 +240,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct block_device *part); void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, unsigned int inflight[2]); +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) diff --git a/block/blk-settings.c b/block/blk-settings.c index 7019b8e204d9..52fa777d2998 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -25,6 +25,13 @@ void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); +void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold) +{ + q->rq_hang_threshold = hang_threshold; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_hang_threshold); + /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 63e481262336..f852ce8b40a4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -438,6 +438,26 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, return count; } +static ssize_t queue_hang_threshold_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%u\n", q->rq_hang_threshold); +} + +static ssize_t queue_hang_threshold_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned int hang_threshold; + int err; + + err = kstrtou32(page, 10, &hang_threshold); + if (err || hang_threshold == 0) + return -EINVAL; + + blk_queue_rq_hang_threshold(q, hang_threshold); + + return count; +} + static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -527,6 +547,7 @@ QUEUE_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); +QUEUE_RW_ENTRY(queue_hang_threshold, "hang_threshold"); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); @@ -656,6 +677,7 @@ static struct attribute *queue_attrs[] = { #endif &queue_virt_boundary_mask_entry.attr, &queue_dma_alignment_entry.attr, + &queue_hang_threshold_entry.attr, NULL, }; diff --git a/block/blk.h b/block/blk.h index 08a358bc0919..851a8a1a687d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -285,6 +285,8 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf); ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, diff --git a/block/genhd.c b/block/genhd.c index f9b81be6c761..21f8ea8c6c25 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1007,6 +1007,23 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct block_device *bdev = dev_to_bdev(dev); + struct request_queue *q = bdev_get_queue(bdev); + unsigned int hang[2] = {0, 0}; + + /* + * For now, we only support mq device, since don't find a generic method + * to track reqs in single queue device. + */ + if (queue_is_mq(q)) + blk_mq_hang_rw(q, bdev, hang); + + return sprintf(buf, "%8u %8u\n", hang[0], hang[1]); +} + static ssize_t disk_capability_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1051,6 +1068,7 @@ static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); @@ -1094,6 +1112,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, &dev_attr_badblocks.attr, &dev_attr_events.attr, &dev_attr_events_async.attr, diff --git a/block/partitions/core.c b/block/partitions/core.c index e58c8b50350b..6282450e6436 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -208,6 +208,7 @@ static DEVICE_ATTR(alignment_offset, 0444, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, 0444, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); @@ -222,6 +223,7 @@ static struct attribute *part_attrs[] = { &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5b6e86b2c37a..cdd8d5e0f4a1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -375,6 +375,12 @@ struct blk_independent_access_ranges { struct blk_independent_access_range ia_range[]; }; +/* + * default request hang threshold, unit is millisecond. If one request does + * not complete in this threashold time, consider this request as hang. + */ +#define BLK_REQ_HANG_THRESHOLD 5000 + struct request_queue { struct request *last_merge; struct elevator_queue *elevator; @@ -451,6 +457,7 @@ struct request_queue { #endif unsigned int rq_timeout; + unsigned int rq_hang_threshold; struct timer_list timeout; struct work_struct timeout_work; @@ -942,6 +949,8 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); +extern void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); -- Gitee From 04dde45e9960f5264b9066a34fbe3912a2099f65 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 19:41:23 +0800 Subject: [PATCH 214/953] anolis: block: add counter to track io request's d2c time ANBZ: #8419 Indeed tool iostat's await is not good enough, which is somewhat sketchy and could not show request's latency on device driver's side. Here we add a new counter to track io request's d2c time, also with this patch, we can extend iostat to show this value easily. Note: I had checked how iostat is implemented, it just reads fields it needs, so iostat won't be affected by this change, so does tsar. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- Documentation/admin-guide/iostats.rst | 6 ++++++ block/blk-mq.c | 4 ++++ block/genhd.c | 18 +++++++++++++++--- include/linux/part_stat.h | 1 + 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 609a3201fd4e..f9af03371cc1 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -131,6 +131,12 @@ Field 16 -- # of flush requests completed Field 17 -- # of milliseconds spent flushing This is the total number of milliseconds spent by all flush requests. +Field 18 -- # of milliseconds spent reading on device driver's side + +Field 19 -- # of milliseconds spent writing on device driver's side + +Field 20 -- # of milliseconds spent discarding on device driver's side + To avoid introducing performance bottlenecks, no locks are held while modifying these counters. This implies that minor inaccuracies may be introduced when changes collide, so (for instance) adding up all the diff --git a/block/blk-mq.c b/block/blk-mq.c index 464e07792e8c..aaf1f2d8294b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1022,6 +1022,10 @@ static inline void blk_account_io_done(struct request *req, u64 now) update_io_ticks(req->part, jiffies, true); part_stat_inc(req->part, ios[sgrp]); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + if (req->rq_flags & RQF_STATS) { + part_stat_add(req->part, d2c_nsecs[sgrp], + now - req->io_start_time_ns); + } part_stat_unlock(); } } diff --git a/block/genhd.c b/block/genhd.c index 21f8ea8c6c25..4849601923d9 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -109,6 +109,7 @@ static void part_stat_read_all(struct block_device *part, for (group = 0; group < NR_STAT_GROUPS; group++) { stat->nsecs[group] += ptr->nsecs[group]; + stat->d2c_nsecs[group] += ptr->d2c_nsecs[group]; stat->sectors[group] += ptr->sectors[group]; stat->ios[group] += ptr->ios[group]; stat->merges[group] += ptr->merges[group]; @@ -967,7 +968,8 @@ ssize_t part_stat_show(struct device *dev, "%8lu %8lu %8llu %8u " "%8u %8u %8u " "%8lu %8lu %8llu %8u " - "%8lu %8u" + "%8lu %8u " + "%8u %8u %8u" "\n", stat.ios[STAT_READ], stat.merges[STAT_READ], @@ -989,7 +991,10 @@ ssize_t part_stat_show(struct device *dev, (unsigned long long)stat.sectors[STAT_DISCARD], (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), stat.ios[STAT_FLUSH], - (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); + (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, @@ -1279,7 +1284,8 @@ static int diskstats_show(struct seq_file *seqf, void *v) "%lu %lu %lu %u " "%u %u %u " "%lu %lu %lu %u " - "%lu %u" + "%lu %u " + "%u %u %u" "\n", MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd, stat.ios[STAT_READ], @@ -1306,6 +1312,12 @@ static int diskstats_show(struct seq_file *seqf, void *v) NSEC_PER_MSEC), stat.ios[STAT_FLUSH], (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC) ); } diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc3f..f03f0c0735de 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -7,6 +7,7 @@ struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; + u64 d2c_nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; -- Gitee From 6080f786c2af8ec9059ac690b3e59ceb49bdfb0a Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 26 Aug 2022 15:49:58 +0800 Subject: [PATCH 215/953] anolis: ovl: change pr_warn() to pr_warn_ratelimited() ANBZ: #8457 Change pr_warn() to pr_warn_ratelimited() to avoid softlockup caused by high frequent printed messages in abnormal case. Signed-off-by: Joseph Qi Signed-off-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2830 --- fs/overlayfs/super.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 2c056d737c27..a86de37f18c2 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -403,7 +403,8 @@ static int ovl_lower_dir(const char *name, struct path *path, (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; - pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", + pr_warn_ratelimited("fs on '%s' does not support file handles, " + "falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; @@ -526,11 +527,15 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { - pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", + pr_err("%s is in-use as upperdir/workdir of another mount, " + "mount with '-o index=off' to override exclusive " + "upperdir protection.\n", name); return -EBUSY; } else { - pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", + pr_warn_ratelimited("%s is in-use as upperdir/workdir of " + "another mount, accessing files from both mounts will " + "result in undefined behavior.\n", name); return 0; } -- Gitee From 18793bb23d2f07fa68f5eac1d1e37aa617aa7b45 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:08 +0800 Subject: [PATCH 216/953] anolis: io_uring: re-add sqthread percpu polling support ANBZ: #8410 add sqthread percpu polling feature to acnk-6.6. ---- we should deal with sqthread creation method 'create_io_thread' when applying SQTHREAD_PERCPU feature in ANCK, which used to be 'kthread_create'. There are some cases we found and solved in this patch: 1. avoid premature destruction in sqthread_percpu_polling We found a cornor case in sqthread_percpu_polling usage: There are two processes (not in the same thread group) sharing the same sqthread. Besides, the first process created the sqthread. If the first process end its execution, the sqthread end its life and exit too, causing the second process's halt. This behaviour is not accepted. And the reason is: we want to make 'sqd->refs' to be the trigger for sqthread exiting, but when the first process end, it will also let sqthread end. Thus, we should check sqd->refs to make sure there isn't exist others using this sqthread. 2. modify create_io_thread to adapt sqthread_percpu_poll. In contrast to original kthread_create sqthread, create_io_thread is more suitable for threads in the same group sharing one sqthread. But this is not perfect for io_thread sharing between processes( like sqthread_percpu usage). This patch originates from a cornor case: There are two processes sharing one sqthread, and the process0 create the sqthread. If we send a SIGKILL to the process0, sqthread can't EOL normally. The reason is that sqthread shared io_uring_files and mm with process0 by using 'CLONE_VM' 'CLONE_FILES' etc. As we 'kill -9 process0', duing to reference count, the io_uring_file finally will be released by sqthread, not process0. What we want is: process0..1..2 -> -> sqd->refs-- && sqd->refs == 0 ? -> io_sq_thread EOL. But now it changes to: io_sq_thread should get out of its loop, and release io_uring file in . But we use 'sqd->refs' judgement to avoid its premature exit, which causing a 'deadlock'. In fact, if we make a sqthread sharing between processes, it has better not shared only special one process's FILES or VM. By delete these flags, sqthread's runtime is more like an independent kernel thread. By the way, in the original kthread_create sqthread creation mode, as it's generated by kthreadd, it has no mm, sharing 'init_fs' and 'init_files', this is quite different from current create_io_thread. Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/linux/sched/task.h | 3 +- include/uapi/linux/io_uring.h | 1 + io_uring/io-wq.c | 4 +- io_uring/io_uring.c | 14 ++++-- io_uring/sqpoll.c | 81 ++++++++++++++++++++++++++++++----- io_uring/sqpoll.h | 2 +- kernel/fork.c | 21 ++++++--- 7 files changed, 102 insertions(+), 24 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index a23af225c898..d2d46728da3e 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -94,7 +94,8 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *copy_process(struct pid *pid, int trace, int node, struct kernel_clone_args *args); -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, + bool unshare); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ce..46d129dd5d4f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,7 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* * Defer running task work to get events. diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 522196dfb0ff..fb7aca49d097 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -771,7 +771,7 @@ static void create_worker_cont(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); clear_bit_unlock(0, &worker->create_state); wq = worker->wq; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); io_worker_release(worker); @@ -840,7 +840,7 @@ static bool create_io_worker(struct io_wq *wq, int index) if (index == IO_WQ_ACCT_BOUND) worker->flags |= IO_WORKER_F_BOUND; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); } else if (!io_should_retry_thread(PTR_ERR(tsk))) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index aed10bae50ac..1e5d59cff589 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -137,6 +137,8 @@ struct io_defer_entry { u32 seq; }; +extern struct io_sq_data __percpu **percpu_sqd; + /* requests with any of those set should undergo io_disarm_next() */ #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) @@ -4043,7 +4045,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU)) return -EINVAL; return io_uring_create(entries, &p, params); @@ -4331,7 +4333,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); } if (copy_to_user(arg, new_count, sizeof(new_count))) @@ -4357,7 +4359,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, err: if (sqd) { mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); } return ret; } @@ -4578,6 +4580,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, static int __init io_uring_init(void) { + int cpu; + #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ @@ -4666,6 +4670,10 @@ static int __init io_uring_init(void) offsetof(struct io_kiocb, cmd.data), sizeof_field(struct io_kiocb, cmd.data), NULL); + percpu_sqd = alloc_percpu(struct io_sq_data *); + for_each_possible_cpu(cpu) + *per_cpu_ptr(percpu_sqd, cpu) = NULL; + #ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); #endif diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 65b5dbe3c850..f4428e9071fd 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -24,6 +24,9 @@ enum { IO_SQ_THREAD_SHOULD_PARK, }; +DEFINE_MUTEX(percpu_sqd_lock); +struct io_sq_data __percpu **percpu_sqd; + void io_sq_thread_unpark(struct io_sq_data *sqd) __releases(&sqd->lock) { @@ -64,14 +67,28 @@ void io_sq_thread_stop(struct io_sq_data *sqd) wait_for_completion(&sqd->exited); } -void io_put_sq_data(struct io_sq_data *sqd) +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd) { + int percpu = 0; + + if ((ctx->flags & IORING_SETUP_SQ_AFF) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) + percpu = 1; + + if (percpu) + mutex_lock(&percpu_sqd_lock); + if (refcount_dec_and_test(&sqd->refs)) { WARN_ON_ONCE(atomic_read(&sqd->park_pending)); io_sq_thread_stop(sqd); + if (percpu) + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = NULL; kfree(sqd); } + + if (percpu) + mutex_unlock(&percpu_sqd_lock); } static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) @@ -94,7 +111,7 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx) io_sqd_update_thread_idle(sqd); io_sq_thread_unpark(sqd); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); ctx->sq_data = NULL; } } @@ -130,11 +147,11 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) } static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, - bool *attached) + bool *attached, bool *percpu_found) { struct io_sq_data *sqd; - *attached = false; + *attached = *percpu_found = false; if (p->flags & IORING_SETUP_ATTACH_WQ) { sqd = io_attach_sq_data(p); if (!IS_ERR(sqd)) { @@ -146,6 +163,19 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, return sqd; } + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); + if (sqd) { + refcount_inc(&sqd->refs); + mutex_unlock(&percpu_sqd_lock); + *percpu_found = true; + return sqd; + } + mutex_unlock(&percpu_sqd_lock); + } + sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); @@ -210,8 +240,16 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || signal_pending(current)) { mutex_unlock(&sqd->lock); - if (signal_pending(current)) + if (signal_pending(current)) { did_sig = get_signal(&ksig); + if (did_sig && sqd->sq_cpu != -1 && + refcount_read(&sqd->refs) != 0) { + mutex_lock(&percpu_sqd_lock); + if (*per_cpu_ptr(percpu_sqd, sqd->sq_cpu) == sqd) + did_sig = false; + mutex_unlock(&percpu_sqd_lock); + } + } cond_resched(); mutex_lock(&sqd->lock); sqd->sq_cpu = raw_smp_processor_id(); @@ -362,13 +400,26 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; struct io_sq_data *sqd; - bool attached; + bool attached, percpu_found; ret = security_uring_sqpoll(); if (ret) return ret; - sqd = io_get_sq_data(p, &attached); + if ((ctx->flags & IORING_SETUP_ATTACH_WQ) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + /* ATTACH_WQ and SQPOLL_PERCPU are mutual exclusive */ + ret = -EINVAL; + goto err; + } + if ((ctx->flags & IORING_SETUP_SQPOLL_PERCPU) && + !(ctx->flags & IORING_SETUP_SQ_AFF)) { + /* SQPOLL_PERCPU and SQ_AFF should both exist */ + ret = -EINVAL; + goto err; + } + + sqd = io_get_sq_data(p, &attached, &percpu_found); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; @@ -389,7 +440,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ret < 0) goto err; - if (attached) + if (attached || percpu_found) return 0; if (p->flags & IORING_SETUP_SQ_AFF) { @@ -405,19 +456,27 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; - tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE, + !!(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; } sqd->thread = tsk; + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = sqd; + mutex_unlock(&percpu_sqd_lock); + } ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & IORING_SETUP_SQ_AFF) { - /* Can't have SQ_AFF without SQPOLL */ + } else if (p->flags & (IORING_SETUP_SQ_AFF | + IORING_SETUP_SQPOLL_PERCPU)) { + /* Can't have SQ_AFF or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 8df37e8c9149..1579a0a2bdc3 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -25,6 +25,6 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx); void io_sq_thread_stop(struct io_sq_data *sqd); void io_sq_thread_park(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd); -void io_put_sq_data(struct io_sq_data *sqd); +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); diff --git a/kernel/fork.c b/kernel/fork.c index 71b1c8ee328e..daf5c6e2c314 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2849,13 +2849,22 @@ struct task_struct * __init fork_idle(int cpu) * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) -{ - unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| - CLONE_IO; +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, + bool unshare) +{ + unsigned long flags = unshare ? 0 : (CLONE_FS|CLONE_FILES| + CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO|CLONE_VM); + /* we use 'unshare' flag to try to create an independent io_thread, + * 'unshare' describes whether child share parent's mm directly (with + * refcount add one), or it should copy mm/files when copy_process(). + * By setting this flag, the io_thread won't share parent's mm + * directly, but can be shared among different tasks, and looks more + * reasonably. + */ struct kernel_clone_args args = { - .flags = ((lower_32_bits(flags) | CLONE_VM | - CLONE_UNTRACED) & ~CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_UNTRACED) + & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, -- Gitee From 09cd20be9d03aa4da5ebb89905aa7d6562a88caf Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:31 +0800 Subject: [PATCH 217/953] anolis: io_uring: add support for us granularity of io_sq_thread_idle ANBZ: #8410 currently unit of io_sq_thread_idle is millisecond, the smallest value is 1ms, which means for IOPS > 1000, sqthread will very likely take 100% cpu usage. This is not necessary in some cases, like users may don't care about latency much in low IO pressure (like 1000 < IOPS < 20000), but cpu resource does matter. So we offer an option of macrosecond granularity of io_sq_thread_idle. Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/uapi/linux/io_uring.h | 1 + io_uring/io_uring.c | 3 +- io_uring/sqpoll.c | 64 ++++++++++++++++++++++++++++------- io_uring/sqpoll.h | 2 ++ 4 files changed, 57 insertions(+), 13 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 46d129dd5d4f..08dad474a82b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,7 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_IDLE_US (1U << 30) /* unit of thread_idle is macrosecond */ #define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 1e5d59cff589..aa4be64d813d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4045,7 +4045,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU | + IORING_SETUP_IDLE_US)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index f4428e9071fd..de494bebc84c 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -96,11 +96,36 @@ static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->idle_mode_us = false; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + bool idle_mode_us = ctx->flags & IORING_SETUP_IDLE_US; + unsigned int tmp_idle = idle_mode_us ? ctx->sq_thread_idle : + jiffies_to_usecs(ctx->sq_thread_idle); + + if (idle_mode_us && !sqd->idle_mode_us) + sqd->idle_mode_us = true; + + if (sq_thread_idle < tmp_idle) + sq_thread_idle = tmp_idle; + } + + if (!sqd->idle_mode_us) + sq_thread_idle = usecs_to_jiffies(sq_thread_idle); sqd->sq_thread_idle = sq_thread_idle; } +static inline u64 io_current_time(bool idle_mode_us) +{ + return idle_mode_us ? (ktime_get_ns() >> 10) : get_jiffies_64(); +} + +static inline bool io_time_after(bool idle_mode_us, u64 timeout) +{ + u64 now = io_current_time(idle_mode_us); + + return time_after64(now, timeout); +} + void io_sq_thread_finish(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; @@ -261,7 +286,7 @@ static int io_sq_thread(void *data) { struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long timeout = 0; + u64 timeout = 0; char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); @@ -285,7 +310,7 @@ static int io_sq_thread(void *data) if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) break; - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } cap_entries = !list_is_singular(&sqd->ctx_list); @@ -298,9 +323,9 @@ static int io_sq_thread(void *data) if (io_run_task_work()) sqt_spin = true; - if (sqt_spin || !time_after(jiffies, timeout)) { + if (sqt_spin || !io_time_after(sqd->idle_mode_us, timeout)) { if (sqt_spin) - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); @@ -347,7 +372,7 @@ static int io_sq_thread(void *data) } finish_wait(&sqd->wait, &wait); - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } io_uring_cancel_generic(true, sqd); @@ -378,6 +403,8 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) finish_wait(&ctx->sqo_sq_wait, &wait); } +#define DEFAULT_SQ_IDLE_US 10 + __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -427,9 +454,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); - if (!ctx->sq_thread_idle) - ctx->sq_thread_idle = HZ; + if ((ctx->flags & IORING_SETUP_IDLE_US) && + !(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + ret = -EINVAL; + goto err; + } + + /* + * for ms mode: ctx->sq_thread_idle is jiffies + * for us mode: ctx->sq_thread_idle is time in terms of microsecond + */ + if (ctx->flags & IORING_SETUP_IDLE_US) + ctx->sq_thread_idle = p->sq_thread_idle ? + p->sq_thread_idle : DEFAULT_SQ_IDLE_US; + else + ctx->sq_thread_idle = p->sq_thread_idle ? + msecs_to_jiffies(p->sq_thread_idle) : HZ; io_sq_thread_park(sqd); list_add(&ctx->sqd_list, &sqd->ctx_list); @@ -474,9 +514,9 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & (IORING_SETUP_SQ_AFF | + } else if (p->flags & (IORING_SETUP_SQ_AFF | IORING_SETUP_IDLE_US | IORING_SETUP_SQPOLL_PERCPU)) { - /* Can't have SQ_AFF or SQPOLL_PERCPU without SQPOLL */ + /* Can't have SQ_AFF or IDLE_US or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 1579a0a2bdc3..60eec0c97864 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -18,6 +18,8 @@ struct io_sq_data { unsigned long state; struct completion exited; + + bool idle_mode_us; }; int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p); -- Gitee From 8606b5bbeddb1176d363256004e244b5cb9b5390 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:44 +0800 Subject: [PATCH 218/953] anolis: io_uring: submit sqes in the original context when waking up sqthread ANBZ: #8410 sqes are submitted by sqthread when it is leveraged, which means there is IO latency when waking up sqthread. To wipe it out, submit limited number of sqes in the original task context. [Rebase note:] 1. As sqthread using create_io_thread func and member var 'sqo_task' not used anymore, we should't use same_thread_group to judge the relationship between usermode app and sqthread. 2. Now notify type is specified at io_req creation time, and the lowest level is TWA_SIGNAL_NO_IPI (in older version its TWA_NONE), there is no need to additionally specify notify type for submit_on_idle. Refers to patch (9f010507bb io_uring: set task_work notify method at init time). Signed-off-by: Hao Xu Signed-off-by: Shile Zhang Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/uapi/linux/io_uring.h | 1 + io_uring/io_uring.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 08dad474a82b..fdea50f53da3 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -468,6 +468,7 @@ struct io_cqring_offsets { #define IORING_ENTER_SQ_WAIT (1U << 2) #define IORING_ENTER_EXT_ARG (1U << 3) #define IORING_ENTER_REGISTERED_RING (1U << 4) +#define IORING_ENTER_SQ_SUBMIT_ON_IDLE (1U << 31) /* * Passed in for io_uring_setup(2). Copied back with updated info on success diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index aa4be64d813d..7f99dc3a365b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1716,11 +1716,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle - * in sq thread task context or in io worker task context. If - * current task context is sq thread, we don't need to check - * whether should wake up sq thread. + * in sq thread task context or in io worker task context or + * in original context. If current task context is sq thread, + * we don't need to check whether should wake up sq thread. */ if ((ctx->flags & IORING_SETUP_SQPOLL) && + (current != ctx->sq_data->thread) && wq_has_sleeper(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); @@ -3594,6 +3595,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_SQ_SUBMIT_ON_IDLE | IORING_ENTER_REGISTERED_RING))) return -EINVAL; @@ -3637,8 +3639,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ret = -EOWNERDEAD; goto out; } - if (flags & IORING_ENTER_SQ_WAKEUP) + if (flags & IORING_ENTER_SQ_WAKEUP) { wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_SUBMIT_ON_IDLE) { + bool has_lock; + + has_lock = mutex_trylock(&ctx->uring_lock); + if (has_lock) { + io_submit_sqes(ctx, min(to_submit, 8U)); + mutex_unlock(&ctx->uring_lock); + } + } + } if (flags & IORING_ENTER_SQ_WAIT) io_sqpoll_wait_sq(ctx); -- Gitee From f7d35fbe39005ef29b4c6da0fd31111f498677f4 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:12 +0800 Subject: [PATCH 219/953] anolis: block-throttle: enable hierarchical throttling even on traditional hierarchy ANBZ: #8411 ECI may have an use case that configuring each device mapper disk throttling policy just under root blkio cgroup, but actually using them in different containers. Since hierarchical throttling is now only supported on cgroup v2 and ECI uses cgroup v1, so we have to enable hierarchical throttling on cgroup v1. This is ported from redhat 7u, and a year ago Jiufei already ported it to alikernel 4.9 as well. So I think this change should be acceptable. Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 16f5766620a4..824569637fca 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -405,7 +405,9 @@ static void throtl_pd_init(struct blkg_policy_data *pd) * regardless of the position of the group in the hierarchy. */ sq->parent_sq = &td->service_queue; - if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) + + /* Enable hierarchical throttling even on traditional hierarchy */ + if (blkg->parent) sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; tg->td = td; } -- Gitee From e05cb903e32b1905f9ee409f1012bc560ceac141 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:21 +0800 Subject: [PATCH 220/953] anolis: blk-throttle: support io delay stats ANBZ: #8411 Add blkio.throttle.io_service_time and blkio.throttle.io_wait_time to get per-cgroup io delay statistics. io_service_time represents the time spent after io throttle to io completion, while io_wait_time represents the time spent on throttle queue. [Merge note:] As bio_init() has been changed, we should initialize new 'struct bio' member variables. Also squash patch "anolis: blk-throttle: fix tg NULL pointer dereference" Also squash patch "anolis: block: replace reserved field with extended bio_flags" Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: zhongjiang-ali Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/bio.c | 10 ++++ block/blk-throttle.c | 107 +++++++++++++++++++++++++++++++++++--- block/blk-throttle.h | 4 ++ include/linux/bio.h | 15 ++++++ include/linux/blk_types.h | 40 ++++++++++++++ 5 files changed, 169 insertions(+), 7 deletions(-) diff --git a/block/bio.c b/block/bio.c index c87160fc8974..6784bbe44d16 100644 --- a/block/bio.c +++ b/block/bio.c @@ -263,6 +263,12 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_issue.value = 0; if (bdev) bio_associate_blkg(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + bio->start_time_ns = 0; + bio->io_start_time_ns = 0; + bio->bi_tg_end_io = NULL; + bio->bi_tg_private = NULL; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif @@ -1605,6 +1611,10 @@ void bio_endio(struct bio *bio) blk_throtl_bio_endio(bio); /* release cgroup info */ bio_uninit(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + if (bio->bi_tg_end_io) + bio->bi_tg_end_io(bio); +#endif if (bio->bi_end_io) bio->bi_end_io(bio); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 824569637fca..d1952cf949cc 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -345,11 +345,11 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (!tg) return NULL; - if (blkg_rwstat_init(&tg->stat_bytes, gfp)) - goto err_free_tg; - - if (blkg_rwstat_init(&tg->stat_ios, gfp)) - goto err_exit_stat_bytes; + if (blkg_rwstat_init(&tg->stat_bytes, gfp) || + blkg_rwstat_init(&tg->stat_ios, gfp) || + blkg_rwstat_init(&tg->service_time, gfp) || + blkg_rwstat_init(&tg->wait_time, gfp)) + goto err; throtl_service_queue_init(&tg->service_queue); @@ -376,9 +376,11 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, return &tg->pd; -err_exit_stat_bytes: +err: blkg_rwstat_exit(&tg->stat_bytes); -err_free_tg: + blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); kfree(tg); return NULL; } @@ -476,6 +478,8 @@ static void throtl_upgrade_state(struct throtl_data *td); static void throtl_pd_offline(struct blkg_policy_data *pd) { struct throtl_grp *tg = pd_to_tg(pd); + struct blkcg_gq *blkg = pd_to_blkg(pd); + struct blkcg_gq *parent = blkg->parent; tg->bps[READ][LIMIT_LOW] = 0; tg->bps[WRITE][LIMIT_LOW] = 0; @@ -486,6 +490,12 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) if (!tg->td->limit_valid[tg->td->limit_index]) throtl_upgrade_state(tg->td); + if (parent) { + blkg_rwstat_add_aux(&blkg_to_tg(parent)->service_time, + &tg->service_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, + &tg->wait_time); + } } static void throtl_pd_free(struct blkg_policy_data *pd) @@ -495,9 +505,19 @@ static void throtl_pd_free(struct blkg_policy_data *pd) del_timer_sync(&tg->service_queue.pending_timer); blkg_rwstat_exit(&tg->stat_bytes); blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); kfree(tg); } +static void throtl_pd_reset(struct blkg_policy_data *pd) +{ + struct throtl_grp *tg = pd_to_tg(pd); + + blkg_rwstat_reset(&tg->service_time); + blkg_rwstat_reset(&tg->wait_time); +} + static struct throtl_grp * throtl_rb_first(struct throtl_service_queue *parent_sq) { @@ -960,6 +980,64 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, return false; } +static void throtl_stats_update_completion(struct throtl_grp *tg, + uint64_t start_time, + uint64_t io_start_time, + int op) +{ + unsigned long flags; + uint64_t now = sched_clock(); + + local_irq_save(flags); + if (time_after64(now, io_start_time)) + blkg_rwstat_add(&tg->service_time, op, now - io_start_time); + if (time_after64(io_start_time, start_time)) + blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + local_irq_restore(flags); +} + +static void throtl_bio_end_io(struct bio *bio) +{ + struct throtl_grp *tg; + + rcu_read_lock(); + /* see comments in throtl_bio_stats_start() */ + if (!bio_ext_flagged(bio, BIO_THROTL_STATED)) + goto out; + + tg = (struct throtl_grp *)bio->bi_tg_private; + if (!tg) + goto out; + + throtl_stats_update_completion(tg, bio_start_time_ns(bio), + bio_io_start_time_ns(bio), + bio_op(bio)); + blkg_put(tg_to_blkg(tg)); + bio_clear_ext_flag(bio, BIO_THROTL_STATED); +out: + rcu_read_unlock(); +} + +static inline void throtl_bio_stats_start(struct bio *bio, struct throtl_grp *tg) +{ + int op = bio_op(bio); + + /* + * It may happen that end_io will be called twice like dm-thin, + * which will save origin end_io first, and call its overwrite + * end_io and then the saved end_io. We use bio flag + * BIO_THROTL_STATED to do only once statistics. + */ + if ((op == REQ_OP_READ || op == REQ_OP_WRITE) && + !bio_ext_flagged(bio, BIO_THROTL_STATED)) { + blkg_get(tg_to_blkg(tg)); + bio_set_ext_flag(bio, BIO_THROTL_STATED); + bio->bi_tg_end_io = throtl_bio_end_io; + bio->bi_tg_private = tg; + bio_set_start_time_ns(bio); + } +} + static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); @@ -1488,6 +1566,16 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, stat_ios), .seq_show = tg_print_rwstat_recursive, }, + { + .name = "throttle.io_service_time", + .private = offsetof(struct throtl_grp, service_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_wait_time", + .private = offsetof(struct throtl_grp, wait_time), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; @@ -1716,6 +1804,7 @@ struct blkcg_policy blkcg_policy_throtl = { .pd_online_fn = throtl_pd_online, .pd_offline_fn = throtl_pd_offline, .pd_free_fn = throtl_pd_free, + .pd_reset_stats_fn = throtl_pd_reset, }; void blk_throtl_cancel_bios(struct gendisk *disk) @@ -2188,6 +2277,8 @@ bool __blk_throtl_bio(struct bio *bio) rcu_read_lock(); + throtl_bio_stats_start(bio, tg); + spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2277,6 +2368,8 @@ bool __blk_throtl_bio(struct bio *bio) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif spin_unlock_irq(&q->queue_lock); + if (!throttled) + bio_set_io_start_time_ns(bio); rcu_read_unlock(); return throttled; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index bffbc9cfc8ab..1e1f6a858571 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -150,6 +150,10 @@ struct throtl_grp { struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; + /* total time spent on lower layer: scheduler, device and others */ + struct blkg_rwstat service_time; + /* total time spent on block throttle */ + struct blkg_rwstat wait_time; }; extern struct blkcg_policy blkcg_policy_throtl; diff --git a/include/linux/bio.h b/include/linux/bio.h index 0286bada25ce..efb40c3282ca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -242,6 +242,21 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } +static inline bool bio_ext_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_ext_flags & (1U << bit)) != 0; +} + +static inline void bio_set_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags |= (1U << bit); +} + +static inline void bio_clear_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags &= ~(1U << bit); +} + static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 92c8997b1938..ce14257bbf74 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -10,6 +10,7 @@ #include #include #include +#include struct bio_set; struct bio; @@ -287,6 +288,12 @@ struct bio { */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; +#ifdef CONFIG_BLK_DEV_THROTTLING + unsigned long long start_time_ns; /* when passed to block throttle */ + unsigned long long io_start_time_ns; /* when no more throttle */ + bio_end_io_t *bi_tg_end_io; + void *bi_tg_private; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST u64 bi_iocost_cost; #endif @@ -316,6 +323,8 @@ struct bio { struct bio_set *bi_pool; + unsigned long bi_ext_flags; /* extend the bi_flags */ + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -349,6 +358,37 @@ enum { BIO_FLAG_LAST }; +/* + * Extend bio flags should be added in here + */ +#define BIO_THROTL_STATED 0 /* bio already stated */ + +#ifdef CONFIG_BLK_DEV_THROTTLING +static inline void bio_set_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void bio_set_io_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t bio_start_time_ns(struct bio *bio) +{ + return bio->start_time_ns; +} + +static inline uint64_t bio_io_start_time_ns(struct bio *bio) +{ + return bio->io_start_time_ns; +} +#endif + typedef __u32 __bitwise blk_mq_req_flags_t; #define REQ_OP_BITS 8 -- Gitee From a78e5f728441f252e530bf5897d71c1f8afb9cb6 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:39 +0800 Subject: [PATCH 221/953] anolis: blk-throttle: add throttled io/bytes counter ANBZ: #8411 Add 2 interfaces to stat io throttle information: blkio.throttle.total_io_queued blkio.throttle.total_bytes_queued These interfaces are used for monitoring throttled io/bytes and analyzing if delay has relation with io throttle. Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 27 ++++++++++++++++++++++++++- block/blk-throttle.h | 4 ++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index d1952cf949cc..8c2380c46f8a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -348,7 +348,9 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (blkg_rwstat_init(&tg->stat_bytes, gfp) || blkg_rwstat_init(&tg->stat_ios, gfp) || blkg_rwstat_init(&tg->service_time, gfp) || - blkg_rwstat_init(&tg->wait_time, gfp)) + blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->total_bytes_queued, gfp) || + blkg_rwstat_init(&tg->total_io_queued, gfp)) goto err; throtl_service_queue_init(&tg->service_queue); @@ -381,6 +383,8 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); return NULL; } @@ -495,6 +499,10 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) &tg->service_time); blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, + &tg->total_bytes_queued); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, + &tg->total_io_queued); } } @@ -507,6 +515,8 @@ static void throtl_pd_free(struct blkg_policy_data *pd) blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); } @@ -516,6 +526,8 @@ static void throtl_pd_reset(struct blkg_policy_data *pd) blkg_rwstat_reset(&tg->service_time); blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->total_bytes_queued); + blkg_rwstat_reset(&tg->total_io_queued); } static struct throtl_grp * @@ -1083,6 +1095,9 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), + throtl_bio_data_size(bio)); + blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); throtl_enqueue_tg(tg); } @@ -1576,6 +1591,16 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, wait_time), .seq_show = tg_print_rwstat, }, + { + .name = "throttle.total_bytes_queued", + .private = offsetof(struct throtl_grp, total_bytes_queued), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_io_queued", + .private = offsetof(struct throtl_grp, total_io_queued), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index 1e1f6a858571..2db648bafe76 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -154,6 +154,10 @@ struct throtl_grp { struct blkg_rwstat service_time; /* total time spent on block throttle */ struct blkg_rwstat wait_time; + /* total bytes throttled */ + struct blkg_rwstat total_bytes_queued; + /* total IOs throttled */ + struct blkg_rwstat total_io_queued; }; extern struct blkcg_policy blkcg_policy_throtl; -- Gitee From f1ec47090233df997fb00b7f206d17385a71baad Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:53 +0800 Subject: [PATCH 222/953] anolis: block-throttle: add counters for completed io ANBZ: #8411 Now we have counters for wait_time and service_time, but no completed ios, so the average latency can not be measured. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 12 ++++++++++++ block/blk-throttle.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8c2380c46f8a..e322512de001 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -349,6 +349,7 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_init(&tg->stat_ios, gfp) || blkg_rwstat_init(&tg->service_time, gfp) || blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->completed, gfp) || blkg_rwstat_init(&tg->total_bytes_queued, gfp) || blkg_rwstat_init(&tg->total_io_queued, gfp)) goto err; @@ -383,6 +384,7 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); blkg_rwstat_exit(&tg->total_bytes_queued); blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); @@ -499,6 +501,8 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) &tg->service_time); blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->completed, + &tg->completed); blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, &tg->total_bytes_queued); blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, @@ -515,6 +519,7 @@ static void throtl_pd_free(struct blkg_policy_data *pd) blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); blkg_rwstat_exit(&tg->total_bytes_queued); blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); @@ -526,6 +531,7 @@ static void throtl_pd_reset(struct blkg_policy_data *pd) blkg_rwstat_reset(&tg->service_time); blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->completed); blkg_rwstat_reset(&tg->total_bytes_queued); blkg_rwstat_reset(&tg->total_io_queued); } @@ -1005,6 +1011,7 @@ static void throtl_stats_update_completion(struct throtl_grp *tg, blkg_rwstat_add(&tg->service_time, op, now - io_start_time); if (time_after64(io_start_time, start_time)) blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + blkg_rwstat_add(&tg->completed, op, 1); local_irq_restore(flags); } @@ -1591,6 +1598,11 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, wait_time), .seq_show = tg_print_rwstat, }, + { + .name = "throttle.io_completed", + .private = offsetof(struct throtl_grp, completed), + .seq_show = tg_print_rwstat, + }, { .name = "throttle.total_bytes_queued", .private = offsetof(struct throtl_grp, total_bytes_queued), diff --git a/block/blk-throttle.h b/block/blk-throttle.h index 2db648bafe76..a65cdb0cad83 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -154,6 +154,8 @@ struct throtl_grp { struct blkg_rwstat service_time; /* total time spent on block throttle */ struct blkg_rwstat wait_time; + /* total IOs completed */ + struct blkg_rwstat completed; /* total bytes throttled */ struct blkg_rwstat total_bytes_queued; /* total IOs throttled */ -- Gitee From f11bf33434795a4e50284ef9b794fc09f8d81a42 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:59 +0800 Subject: [PATCH 223/953] anolis: blk-throttle: limit bios to fix amount of pages entering writeback prematurely ANBZ: #8411 Currently in blk_throtl_bio(), if one bio exceeds its throtl_grp's bps or iops limit, this bio will be queued throtl_grp's throtl_service_queue, then obviously mm subsys will submit more pages, even underlying device can not handle these io requests, also this will make large amount of pages entering writeback prematurely, later if some process writes some of these pages, it will wait for long time. I have done some tests: one process does buffered writes on a 1GB file, and make this process's blkcg max bps limit be 10MB/s, I observe this: #cat /proc/meminfo | grep -i back Writeback: 900024 kB WritebackTmp: 0 kB I think this Writeback value is just too big, indeed many bios have been queued in throtl_grp's throtl_service_queue, if one process try to write the last bio's page in this queue, it will call wait_on_page_writeback(page), which must wait the previous bios to finish and will take long time, we have also see 120s hung task warning in our server. INFO: task kworker/u128:0:30072 blocked for more than 120 seconds. Tainted: G E 4.9.147-013.ck3000_015_test.cl7.x86_64 #1 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kworker/u128:0 D 0 30072 2 0x00000000 Workqueue: writeback wb_workfn (flush-8:16) ffff882ddd066b40 0000000000000000 ffff882e5cad3400 ffff882fbe959e80 ffff882fa50b1a00 ffffc9003a5a3768 ffffffff8173325d ffffc9003a5a3780 00ff882e5cad3400 ffff882fbe959e80 ffffffff81360b49 ffff882e5cad3400 Call Trace: [] ? __schedule+0x23d/0x6d0 [] ? alloc_request_struct+0x19/0x20 [] schedule+0x36/0x80 [] schedule_timeout+0x206/0x4b0 [] ? sched_clock+0x9/0x10 [] ? get_request+0x403/0x810 [] ? ktime_get+0x40/0xb0 [] io_schedule_timeout+0xda/0x170 [] ? bit_wait+0x60/0x60 [] bit_wait_io+0x1b/0x60 [] __wait_on_bit+0x58/0x90 [] ? find_get_pages_tag+0x161/0x2e0 [] wait_on_page_bit+0x82/0xa0 [] ? wake_atomic_t_function+0x60/0x60 [] mpage_prepare_extent_to_map+0x2d1/0x310 [ext4] [] ? kmem_cache_alloc+0x185/0x1a0 [] ? ext4_init_io_end+0x1f/0x40 [ext4] [] ext4_writepages+0x404/0xef0 [ext4] [] ? scsi_init_io+0x44/0x200 [] ? fprop_fraction_percpu+0x2f/0x80 [] do_writepages+0x1e/0x30 [] __writeback_single_inode+0x45/0x320 [] writeback_sb_inodes+0x272/0x600 [] wb_writeback+0x10b/0x300 [] wb_workfn+0xb4/0x380 [] ? try_to_wake_up+0x59/0x3e0 [] process_one_work+0x189/0x420 [] worker_thread+0x4e/0x4b0 [] ? process_one_work+0x420/0x420 [] kthread+0xe6/0x100 [] ? kthread_park+0x60/0x60 [] ret_from_fork+0x39/0x50 To fix this issue, we can simply limit throtl_service_queue's max queued bios, currently we limit it to throtl_grp's bps_limit or iops limit, if it still exteeds, we just sleep for a while. [merge note] Also includes commit "anolis: blk-throttle: fix race bug that loses wakeup event" Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Shile Zhang Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-core.c | 11 ++++++++++- block/blk-throttle.c | 29 ++++++++++++++++++++++++++++- block/blk-throttle.h | 16 ++++++++++++---- 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index c18d1186e10b..20f864976e0d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -732,6 +732,9 @@ void submit_bio_noacct(struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; + DEFINE_WAIT(wait); + wait_queue_head_t *wait_head = NULL; + bool throtl; might_sleep(); @@ -805,7 +808,13 @@ void submit_bio_noacct(struct bio *bio) break; } - if (blk_throtl_bio(bio)) + throtl = blk_throtl_bio(bio, &wait_head, &wait); + if (wait_head) { + io_schedule(); + finish_wait(wait_head, &wait); + } + + if (throtl) return; submit_bio_noacct_nocheck(bio); return; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e322512de001..a78657b07ed4 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -331,6 +331,10 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq) { INIT_LIST_HEAD(&sq->queued[READ]); INIT_LIST_HEAD(&sq->queued[WRITE]); + sq->nr_queued_bytes[READ] = 0; + sq->nr_queued_bytes[WRITE] = 0; + init_waitqueue_head(&sq->wait[READ]); + init_waitqueue_head(&sq->wait[WRITE]); sq->pending_tree = RB_ROOT_CACHED; timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); } @@ -1102,6 +1106,7 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + sq->nr_queued_bytes[rw] += throtl_bio_data_size(bio); blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), throtl_bio_data_size(bio)); blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); @@ -1160,6 +1165,15 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) */ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; + sq->nr_queued_bytes[rw] -= throtl_bio_data_size(bio); + WARN_ON_ONCE(sq->nr_queued_bytes[rw] < 0); + + if (wq_has_sleeper(&sq->wait[rw])) { + if (sq->nr_queued_bytes[rw] > 0) + wake_up(&sq->wait[rw]); + else + wake_up_all(&sq->wait[rw]); + } throtl_charge_bio(tg, bio); @@ -2301,7 +2315,8 @@ static void throtl_upgrade_state(struct throtl_data *td) } #endif -bool __blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; @@ -2385,6 +2400,18 @@ bool __blk_throtl_bio(struct bio *bio) tg->last_low_overflow_time[rw] = jiffies; td->nr_queued[rw]++; + + if (rw == WRITE) { + u64 bps_limit = tg_bps_limit(tg, rw); + + if (bps_limit != U64_MAX && + (wq_has_sleeper(&sq->wait[rw]) || + sq->nr_queued_bytes[rw] > div_u64(bps_limit, 2))) { + *waitq = &sq->wait[rw]; + prepare_to_wait_exclusive(*waitq, wait, TASK_UNINTERRUPTIBLE); + } + } + throtl_add_bio_tg(bio, qn, tg); throttled = true; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index a65cdb0cad83..4b5ce538ca5b 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -41,6 +41,8 @@ struct throtl_service_queue { */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ unsigned int nr_queued[2]; /* number of queued bios */ + long nr_queued_bytes[2]; /* number of queued bytes */ + wait_queue_head_t wait[2]; /* * RB tree of active children throtl_grp's, which are sorted by @@ -181,13 +183,18 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) static inline int blk_throtl_init(struct gendisk *disk) { return 0; } static inline void blk_throtl_exit(struct gendisk *disk) { } static inline void blk_throtl_register(struct gendisk *disk) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) +{ + return false; +} static inline void blk_throtl_cancel_bios(struct gendisk *disk) { } #else /* CONFIG_BLK_DEV_THROTTLING */ int blk_throtl_init(struct gendisk *disk); void blk_throtl_exit(struct gendisk *disk); void blk_throtl_register(struct gendisk *disk); -bool __blk_throtl_bio(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait); void blk_throtl_cancel_bios(struct gendisk *disk); static inline bool blk_should_throtl(struct bio *bio) @@ -214,13 +221,14 @@ static inline bool blk_should_throtl(struct bio *bio) return false; } -static inline bool blk_throtl_bio(struct bio *bio) +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { if (!blk_should_throtl(bio)) return false; - return __blk_throtl_bio(bio); + return __blk_throtl_bio(bio, waitq, wait); } #endif /* CONFIG_BLK_DEV_THROTTLING */ -- Gitee From e03219887c06bb01722ff8a6fd7c90baba0c53db Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:52:18 +0800 Subject: [PATCH 224/953] anolis: blk-throttle: add io latency indicators in cgroupV2 ANBZ: #8411 Currently we have already supported io_{wait_time/completed/service_time} and total_{bytes_queued/io_queued} counters in cgroupV1 (blkio cgroup). Now we offer the same interface in cgroupV2 (under io cgroup). Integrate all indicators into one file, named "io.exstat". Before you read it in subcgroup, remember to enable "io" in ancestor's "cgroup.subtree_control". Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- Documentation/admin-guide/cgroup-v2.rst | 24 ++++++++++ block/blk-throttle.c | 61 +++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b26b5274eaaf..8238711ee842 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1717,6 +1717,30 @@ IO Interface Files 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021 + io.extstat + A read-only nested-keyed file. + + Lines are keyed by $MAJ:$MIN device numbers and not ordered. + The following nested keys are defined. + + ======== ============================= + rwait IO read wait time + wwait IO write wait time + rserv IO read service time + wserv IO write service time + rcomp Number of completed read IOs + wcomp Number of completed write IOs + rbytesq Bytes of queued read IOs + wbytesq Bytes of queued write IOs + riosq Number of queued read IOs + wiosq Number of queued write IOs + ======== ============================= + + An example read output follows:: + + 253:16 rwait=0 wwait=3300 rserv=0 wserv=414366321956 rcomp=0 wcomp=12 rbytesq=0 wbytesq=40960000 riosq=0 wiosq=12 + 253:0 rwait=0 wwait=0 rserv=0 wserv=0 rcomp=0 wcomp=0 rbytesq=0 wbytesq=0 riosq=0 wiosq=0 + io.cost.qos A read-write nested-keyed file which exists only on the root cgroup. diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a78657b07ed4..8921f61d257a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1693,6 +1693,56 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, return 0; } +static u64 tg_prfill_extstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + const char *dname = blkg_dev_name(pd->blkg); + char bufs[10][21] = { "0", "0", "0", "0", "0", "0", "0", "0", "0", "0" }; + struct blkg_rwstat_sample tmp = { }; + + if (!dname) + return 0; + + /* read/write IOs wait time */ + blkg_rwstat_read(&tg->wait_time, &tmp); + snprintf(bufs[0], sizeof(bufs[0]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[1], sizeof(bufs[1]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write IOs service time */ + blkg_rwstat_read(&tg->service_time, &tmp); + snprintf(bufs[2], sizeof(bufs[2]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[3], sizeof(bufs[3]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write completed IOs */ + blkg_rwstat_read(&tg->completed, &tmp); + snprintf(bufs[4], sizeof(bufs[4]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[5], sizeof(bufs[5]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued bytes */ + blkg_rwstat_read(&tg->total_bytes_queued, &tmp); + snprintf(bufs[6], sizeof(bufs[6]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[7], sizeof(bufs[7]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued IOs */ + blkg_rwstat_read(&tg->total_io_queued, &tmp); + snprintf(bufs[8], sizeof(bufs[8]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[9], sizeof(bufs[9]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + + seq_printf(sf, "%s rwait=%s wwait=%s rserv=%s wserv=%s rcomp=%s wcomp=%s " + "rbytesq=%s wbytesq=%s riosq=%s wiosq=%s\n", + dname, bufs[0], bufs[1], bufs[2], bufs[3], bufs[4], + bufs[5], bufs[6], bufs[7], bufs[8], bufs[9]); + + return 0; +} + static int tg_print_limit(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, @@ -1700,6 +1750,13 @@ static int tg_print_limit(struct seq_file *sf, void *v) return 0; } +static int tg_print_extstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_extstat, + &blkcg_policy_throtl, 0, false); + return 0; +} + static ssize_t tg_set_limit(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -1836,6 +1893,10 @@ static struct cftype throtl_files[] = { .write = tg_set_limit, .private = LIMIT_MAX, }, + { + .name = "extstat", + .seq_show = tg_print_extstat, + }, { } /* terminate */ }; -- Gitee From 08ae82f8eea5cf43af53e5ccb717bed79c7f06ee Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 11 Mar 2024 14:41:03 +0800 Subject: [PATCH 225/953] anolis: x86/perf: Add PMU uncore support for Zhaoxin CPU ANBZ: #7809 Add performance monitoring unit support for Zhaoxin processors. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2695 --- MAINTAINERS | 7 + arch/x86/events/zhaoxin/Makefile | 1 + arch/x86/events/zhaoxin/core.c | 53 +- arch/x86/events/zhaoxin/uncore.c | 2900 ++++++++++++++++++++++++++++++ arch/x86/events/zhaoxin/uncore.h | 371 ++++ 5 files changed, 3317 insertions(+), 15 deletions(-) create mode 100644 arch/x86/events/zhaoxin/uncore.c create mode 100644 arch/x86/events/zhaoxin/uncore.h diff --git a/MAINTAINERS b/MAINTAINERS index 3a3a96e4dd39..81a421b24f42 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23877,6 +23877,13 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/zhaoxin.c +ZHAOXIN PMU UNCORE SUPPORT +M: Leoliu-oc +S: Maintained +F: arch/x86/events/zhaoxin/core.c +F: arch/x86/events/zhaoxin/uncore.c +F: arch/x86/events/zhaoxin/uncore.h + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d662..767d6212bac1 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += core.o +obj-y += uncore.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 3e9acdaeed1e..2957b416a6db 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -19,15 +19,15 @@ #include "../perf_event.h" /* - * Zhaoxin PerfMon, used on zxc and later. + * Zhaoxin PerfMon, used on Lujiazui and later. */ static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, - [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, }; static struct event_constraint zxc_event_constraints[] __read_mostly = { @@ -36,7 +36,7 @@ static struct event_constraint zxc_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static struct event_constraint zxd_event_constraints[] __read_mostly = { +static struct event_constraint wudaokou_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ @@ -44,7 +44,7 @@ static struct event_constraint zxd_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static __initconst const u64 zxd_hw_cache_event_ids +static __initconst const u64 wudaokou_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -148,7 +148,7 @@ static __initconst const u64 zxd_hw_cache_event_ids }, }; -static __initconst const u64 zxe_hw_cache_event_ids +static __initconst const u64 lujiazui_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -471,7 +471,7 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .max_events = ARRAY_SIZE(zx_pmon_event_map), .apic = 1, /* - * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits. */ .max_period = (1ULL << 47) - 1, .get_event_constraints = zhaoxin_get_event_constraints, @@ -559,6 +559,8 @@ __init int zhaoxin_pmu_init(void) zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; pr_cont("ZXC events, "); break; @@ -574,26 +576,47 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86_model) { case 0x1b: - memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + memcpy(hw_cache_event_ids, wudaokou_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; - pr_cont("ZXD events, "); + pr_cont("Wudaokou events, "); break; case 0x3b: - memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + + pr_cont("Lujiazui events, "); + break; + case 0x5b: + case 0x6b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, + .cmask = 0x01); + + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029; + if (boot_cpu_data.x86_model == 0x5b) + pr_cont("Yongfeng events, "); - pr_cont("ZXE events, "); break; default: return -ENODEV; diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 000000000000..8d898a10d953 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,2900 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; + + +static bool pcidrv_registered; +static struct pci_driver *uncore_pci_driver; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; +static cpumask_t uncore_cpu_subnode_mask; +static cpumask_t uncore_cpu_cluster_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = + EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages, max_subnodes, max_clusters; +static int clusters_per_subnode; +static int subnodes_per_die; +static int dies_per_socket; + +#define KH40000_MAX_SUBNODE_NUMBER 8 +static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; + +/* get CPU topology register */ +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 + +/* KX5000/KX6000 event control */ +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ + KX5000_UNC_CTL_UMASK_MASK | \ + KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | \ + KX5000_UNC_CTL_CMASK_MASK) + +/* KX5000/KX6000 uncore global register */ +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 + +/* KX5000/KX6000 uncore global control */ +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* KX5000/KX6000 uncore register */ +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 + +/* KH40000 event control */ +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EV_SEL_EXT (1 << 21) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_TRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ + KH40000_PMON_CTL_UMASK_MASK | \ + KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | \ + KH40000_PMON_CTL_TRESH_MASK) + +/* KH40000 LLC register*/ +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 + +/* KH40000 HIF register*/ +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b + +/* KH40000 ZZI(ZPI+ZOI+INI) register*/ +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f + +/* KH40000 MC register*/ +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 + +/* KH40000 PCI register*/ +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 + +/* KH40000 ZPI_DLL register*/ +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 ZDI_DLL register*/ +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 PXPTRF register*/ +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 + +/* KH40000 Box level control */ +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) + +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS) + +#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) + +/* KX8000 LLC register*/ +#define KX8000_LLC_MSR_PMON_CTL0 0x1979 +#define KX8000_LLC_MSR_PMON_CTR0 0x1975 +#define KX8000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX8000 MESH register*/ +#define KX8000_MESH_MSR_PMON_CTL0 0x1983 +#define KX8000_MESH_MSR_PMON_CTR0 0x197f +#define KX8000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX8000 HOMESTOP register*/ +#define KX8000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX8000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX8000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX8000 CCDie ZDI_PL register*/ +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX8000 cIODie ZDI_PL register*/ +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX8000 MC register*/ +#define KX8000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX8000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX8000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX8000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX8000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX8000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX8000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX8000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX8000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX8000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX8000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX8000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX8000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX8000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX8000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX8000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX8000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX8000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX8000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX8000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX8000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX8000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX8000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX8000_ZDI_DL_MMIO_SIZE 0x1000 + + + + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); + +static void get_hdw_config_msr(void *config) +{ + u64 *data = (u64 *)config; + + rdmsrl(BJ_HDW_CONFIG_MSR, *data); +} + +static void get_global_status_msr(void *status) +{ + u64 *data = (u64 *)status; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, *data); +} + +/*topology number : get max packages/subnode/clusters number*/ +static void get_topology_number(void) +{ + int clusters; + int subnodes; + int dies; + int packages; + u64 data; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, data); + + /* check packages number */ + packages = data & 0x1; + if (packages) + max_packages = 2; + else + max_packages = 1; + + /* only Yongfeng needs die/subnode/cluster info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + return; + + /* check dies_per_socket */ + dies = (data >> 12) & 0x1; + if (dies) + dies_per_socket = 2; + else + dies_per_socket = 1; + + /* check subnodes_per_die */ + subnodes = (data >> 32) & 0x3; + if (subnodes == 0x3) + subnodes_per_die = 2; + else + subnodes_per_die = 1; + + /* check clusters_per_subnode */ + clusters = (data >> 6) & 0x3; + if (clusters == 0x3) + clusters_per_subnode = 2; + else + clusters_per_subnode = 1; + + max_subnodes = max_packages * dies_per_socket * subnodes_per_die; + max_clusters = clusters_per_subnode * max_subnodes; +} + +static int get_pcibus_limit(void) +{ + struct pci_dev *dev; + u32 val; + int i = 0; + + dev = pci_get_device(0x1D17, 0x31B1, NULL); + if (dev == NULL) + return -ENODEV; + + pci_read_config_dword(dev, 0x94, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + + if (max_packages == 2) { + pci_read_config_dword(dev, 0x9c, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + } + + return 0; +} + +static int uncore_pcibus_to_subnodeid(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < KH40000_MAX_SUBNODE_NUMBER; i++) { + if (bus->number < kh40000_pcibus_limit[i]) + break; + } + + return i; +} + +DEFINE_PER_CPU(int, zx_package_id); +DEFINE_PER_CPU(int, zx_subnode_id); +DEFINE_PER_CPU(int, zx_cluster_id); + +static void get_topology_info(void) +{ + int cpu; + int cluster_id; + int socket_id; + int die_id; + int subnode_id; + + int die_info; + int subnode_info; + int cluster_info; + + u64 config; + + for_each_present_cpu(cpu) { + smp_call_function_single(cpu, get_global_status_msr, &config, 1); + socket_id = (int)((config >> 3) & 0x1); + per_cpu(zx_package_id, cpu) = socket_id; + + /* only kh40000 needs cluster and subnode info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + continue; + + smp_call_function_single(cpu, get_hdw_config_msr, &config, 1); + + die_info = (int)((config >> 21) & 0x3); + die_id = socket_id * dies_per_socket + die_info; + + subnode_info = (int)((config >> 20) & 0x1); + subnode_id = die_id * subnodes_per_die + subnode_info; + per_cpu(zx_subnode_id, cpu) = subnode_id; + + cluster_info = (int)((config >> 18) & 0x3); + cluster_id = subnode_id * clusters_per_subnode + cluster_info; + per_cpu(zx_cluster_id, cpu) = cluster_id; + } +} + +static int zx_topology_cluster_id(int cpu) +{ + return per_cpu(zx_cluster_id, cpu); +} + +static int zx_topology_subnode_id(int cpu) +{ + return per_cpu(zx_subnode_id, cpu); +} + +static int zx_topology_package_id(int cpu) +{ + return per_cpu(zx_package_id, cpu); +} + +DEFINE_PER_CPU(cpumask_t, zx_cluster_core_bits); +DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); + +static void zx_gen_core_map(void) +{ + int i, nr, cpu; + int cluster_id, subnode_id; + + for_each_present_cpu(cpu) { + cluster_id = zx_topology_cluster_id(cpu); + + for (i = 0; i < 4; i++) { + nr = (cluster_id << 2) + i; + cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + } + } + + for_each_present_cpu(cpu) { + subnode_id = zx_topology_subnode_id(cpu); + + for (i = 0; i < 8; i++) { + nr = (subnode_id << 3) + i; + cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + } + } +} + +static struct cpumask *topology_cluster_core_cpumask(int cpu) +{ + return &per_cpu(zx_cluster_core_bits, cpu); +} + +static struct cpumask *topology_subnode_core_cpumask(int cpu) +{ + return &per_cpu(zx_subnode_core_bits, cpu); +} + +static void uncore_free_pcibus_map(void) +{ + +} + +static int kh40000_pci2node_map_init(void) +{ + return 0; +} + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + return pmu->boxes[zx_topology_cluster_id(cpu)]; + else + return pmu->boxes[zx_topology_subnode_id(cpu)]; + } else { + return pmu->boxes[zx_topology_package_id(cpu)]; + } +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count; + + WARN_ON_ONCE(box->cpu != smp_processor_id()); + rdmsrl(event->hw.event_base, count); + return count; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, + struct perf_event *event, int idx) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->last_tag = ++box->tags[idx]; + + if (uncore_pmc_fixed(hwc->idx)) { + hwc->event_base = uncore_fixed_ctr(box); + hwc->config_base = uncore_fixed_ctl(box); + return; + } + + hwc->config_base = uncore_event_ctl(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 prev_count, new_count, delta; + int shift; + + if (uncore_pmc_fixed(event->hw.idx)) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +/*KX5000/KX6000 uncore ops start*/ +static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, 0); +} + +static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); +} + +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | KX5000_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, KX5000_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *kx5000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask3.attr, + NULL, +}; + +static struct attribute_group kx5000_uncore_format_group = { + .name = "format", + .attrs = kx5000_uncore_formats_attr, +}; + +static struct uncore_event_desc kx5000_uncore_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kx5000_uncore_box = { + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { + &kx5000_uncore_box, + NULL, +}; +/*KX5000/KX6000 uncore ops end*/ + +/*KH40000 msr ops start*/ +static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_init_box(struct zhaoxin_uncore_box *box) +{ + unsigned int msr = uncore_msr_box_ctl(box); + + if (msr) { + wrmsrl(msr, KH40000_PMON_BOX_CTL_INT); + wrmsrl(msr, 0); + } +} + +static struct attribute *kh40000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kh40000_uncore_format_group = { + .name = "format", + .attrs = kh40000_uncore_formats_attr, +}; + +static struct uncore_event_desc kh40000_uncore_llc_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_hif_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { + &kh40000_uncore_llc_box, + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; +/*KH40000 msr ops end*/ + +/*KH40000 pci ops start*/ +static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_pci_disable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count + 1); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count); + + return count; +} + +static void kh40000_uncore_pci_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, KH40000_PMON_PCI_BOX_CTL_INT); +} + +static struct uncore_event_desc kh40000_uncore_imc_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pci_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zpi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zdi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +enum { + KH40000_PCI_UNCORE_MC0, + KH40000_PCI_UNCORE_MC1, + KH40000_PCI_UNCORE_PCI, + KH40000_PCI_UNCORE_ZPI_DLL, + KH40000_PCI_UNCORE_ZDI_DLL, + KH40000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kh40000_uncore_pci_ids[] = { + { /* MC Channe0/1 */ + PCI_DEVICE(0x1D17, 0x31b2), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x071A), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x0731), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 9), + }, + + { /* ZPI_DLL */ + PCI_DEVICE(0x1D17, 0x91c1), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), + }, + + { /* ZDI_DLL */ + PCI_DEVICE(0x1D17, 0x3b03), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kh40000_uncore_pci_driver = { + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, +}; +/*KH40000 pci ops end*/ + + +/*KX8000 msr ops start*/ +static unsigned int kx8000_uncore_msr_offsets[] = { + 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX8000_MESH_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX8000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX8000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX8000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + + +static struct zhaoxin_uncore_type *kx8000_msr_uncores[] = { + &kx8000_uncore_llc_box, + &kx8000_uncore_mesh_box, + &kh40000_uncore_hif_box, + &kx8000_uncore_homestop, + &kx8000_uncore_ccd_zdi_pl, + &kx8000_uncore_iod_zdi_pl, + NULL, +}; +/*KX8000 msr ops end*/ + +/*KX8000 pci ops start*/ +static unsigned int kx8000_mc_ctr_lh_offsets[] = { + 0xc, 0xe, 0x10, 0x12, 0x14 +}; + +static u64 kx8000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_dword(pdev, hwc->event_base + kx8000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx8000_uncore_pci_mc_read_counter +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + + +enum { + KX8000_PCI_UNCORE_MC_A0, + KX8000_PCI_UNCORE_MC_A1, + KX8000_PCI_UNCORE_MC_B0, + KX8000_PCI_UNCORE_MC_B1, + KX8000_PCI_UNCORE_PCI, + KX8000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx8000_pci_uncores[] = { + [KX8000_PCI_UNCORE_MC_A0] = &kx8000_uncore_mc_a0, + [KX8000_PCI_UNCORE_MC_A1] = &kx8000_uncore_mc_a1, + [KX8000_PCI_UNCORE_MC_B0] = &kx8000_uncore_mc_b0, + [KX8000_PCI_UNCORE_MC_B1] = &kx8000_uncore_mc_b1, + [KX8000_PCI_UNCORE_PCI] = &kx8000_uncore_pci, + [KX8000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kx8000_uncore_pci_ids[] = { + { /* MC Channe A0/A1/B0/B1 */ + PCI_DEVICE(0x1D17, 0x31B2), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_MC_A0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D2F2 */ + PCI_DEVICE(0x1D17, 0x0733), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D2F3 */ + PCI_DEVICE(0x1D17, 0x0734), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x0735), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x0739), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D3F3 */ + PCI_DEVICE(0x1D17, 0x073A), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 9), + }, + + { /* PCIE D4F2 */ + PCI_DEVICE(0x1D17, 0x0736), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 10), + }, + + { /* PCIE D4F3 */ + PCI_DEVICE(0x1D17, 0x0737), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 11), + }, + + { /* PCIE D4F4 */ + PCI_DEVICE(0x1D17, 0x0738), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 12), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 13), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 14), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 15), + }, + + { /* PCIE D5F3 */ + PCI_DEVICE(0x1D17, 0x073B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 16), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + + +static struct pci_driver kx8000_uncore_pci_driver = { + .name = "kx8000_uncore", + .id_table = kx8000_uncore_pci_ids, +}; +/*KX8000 pci ops end*/ + +/*KX8000 mmio ops start*/ +static void kx8000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = NULL; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + resource_size_t addr; + u32 pci_dword; + int mmio_base_offset; + + pdev = pci_get_device(0x1d17, 0x31b1, pdev); + if (!pdev) + return; + + if (!strcmp(box->pmu->name, "iod_zdi_dl")) + mmio_base_offset = KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + else + mmio_base_offset = KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + + pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); + addr = (u64)(pci_dword & KX8000_ZDI_DL_MMIO_BASE_MASK) << 32; + + pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); + addr |= pci_dword & KX8000_ZDI_DL_MMIO_MEM0_MASK; + + box->io_addr = ioremap(addr, KX8000_ZDI_DL_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config |= KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); +} + +static void kx8000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + u64 count = 0; + u64 count_low = 0; + u64 count_high = 0; + + if (!box->io_addr) + return 0; + + count_high = readl(box->io_addr + event->hw.event_base) & 0xffff; + count_low = readl(box->io_addr + event->hw.event_base + 4); + count = (count_high << 32) + count_low; + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_mmio_ops = { + .init_box = kx8000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx8000_uncore_mmio_disable_box, + .enable_box = kx8000_uncore_mmio_enable_box, + .disable_event = kx8000_uncore_mmio_disable_event, + .enable_event = kx8000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_dl = { + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_dl = { + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx8000_mmio_uncores[] = { + &kx8000_uncore_iod_zdi_dl, + &kx8000_uncore_ccd_zdi_dl, + NULL, +}; + +/*KX8000 mmio ops end*/ + + + +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ + struct zhaoxin_uncore_box *box; + struct perf_event *event; + unsigned long flags; + int bit; + + box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); + if (!box->n_active || box->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + /* + * disable local interrupt to prevent uncore_pmu_event_start/stop + * to interrupt the update process + */ + local_irq_save(flags); + + /* + * handle boxes with an active event list as opposed to active + * counters + */ + list_for_each_entry(event, &box->active_list, active_entry) { + uncore_perf_event_update(box, event); + } + + for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) + uncore_perf_event_update(box, box->events[bit]); + + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), + HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + box->hrtimer.function = uncore_pmu_hrtimer; +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, + int node) +{ + int i, size, numshared = type->num_shared_regs; + struct zhaoxin_uncore_box *box; + + size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + + box = kzalloc_node(size, GFP_KERNEL, node); + if (!box) + return NULL; + + for (i = 0; i < numshared; i++) + raw_spin_lock_init(&box->shared_regs[i].lock); + + uncore_pmu_init_hrtimer(box); + box->cpu = -1; + box->package_id = -1; + box->cluster_id = -1; + box->subnode_id = -1; + + /* set default hrtimer timeout */ + box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + + INIT_LIST_HEAD(&box->active_list); + + return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return &box->pmu->pmu == event->pmu; +} + +static int +uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = box->pmu->type->num_counters; + if (box->pmu->type->fixed_ctl) + max_count++; + + if (box->n_events >= max_count) + return -EINVAL; + + n = box->n_events; + + if (is_box_event(box, leader)) { + box->event_list[n] = leader; + n++; + } + + if (!dogrp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_box_event(box, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + box->event_list[n] = event; + n++; + } + return n; +} + +static struct event_constraint * +uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct zhaoxin_uncore_type *type = box->pmu->type; + struct event_constraint *c; + + if (type->ops->get_constraint) { + c = type->ops->get_constraint(box, event); + if (c) + return c; + } + + if (event->attr.config == UNCORE_FIXED_EVENT) + return &uncore_constraint_fixed; + + if (type->constraints) { + for_each_event_constraint(c, type->constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + if (box->pmu->type->ops->put_constraint) + box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ + unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + struct event_constraint *c; + int i, wmin, wmax, ret = 0; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + + for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { + c = uncore_get_event_constraint(box, box->event_list[i]); + box->event_constraint[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); + } + + /* fastpath, try to reuse previous register */ + for (i = 0; i < n; i++) { + hwc = &box->event_list[i]->hw; + c = box->event_constraint[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c->idxmsk)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + + __set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + /* slow path */ + if (i != n) + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); + + if (!assign || ret) { + for (i = 0; i < n; i++) + uncore_put_event_constraint(box, box->event_list[i]); + } + return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int idx = event->hw.idx; + + + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + return; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->events[idx] = event; + box->n_active++; + __set_bit(idx, box->active_mask); + + local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); + uncore_enable_event(box, event); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { + uncore_disable_event(box, event); + box->n_active--; + box->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + int assign[UNCORE_PMC_IDX_MAX]; + int i, n, ret; + + if (!box) + return -ENODEV; + + ret = n = uncore_collect_events(box, event, false); + if (ret < 0) + return ret; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + ret = uncore_assign_events(box, assign, n); + if (ret) + return ret; + + /* save events moving to new counters */ + for (i = 0; i < box->n_events; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == assign[i] && + hwc->last_tag == box->tags[assign[i]]) + continue; + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + } + + /* reprogram moved events into new counters */ + for (i = 0; i < n; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx != assign[i] || + hwc->last_tag != box->tags[assign[i]]) + uncore_assign_hw_event(box, event, assign[i]); + else if (i < box->n_events) + continue; + + if (hwc->state & PERF_HES_ARCH) + continue; + + uncore_pmu_event_start(event, 0); + } + box->n_events = n; + + return 0; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int i; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + uncore_put_event_constraint(box, event); + + for (++i; i < box->n_events; i++) + box->event_list[i - 1] = box->event_list[i]; + + --box->n_events; + break; + } + } + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + + uncore_perf_event_update(box, event); +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct zhaoxin_uncore_box *fake_box; + int ret = -EINVAL, n; + + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); + if (!fake_box) + return -ENOMEM; + + fake_box->pmu = pmu; + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = uncore_collect_events(fake_box, leader, true); + if (n < 0) + goto out; + + fake_box->n_events = n; + n = uncore_collect_events(fake_box, event, false); + if (n < 0) + goto out; + + fake_box->n_events = n; + + ret = uncore_assign_events(fake_box, NULL, n); +out: + kfree(fake_box); + return ret; +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + event->cpu = box->cpu; + event->pmu_private = box; + + //event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + + if (event->attr.config == UNCORE_FIXED_EVENT) { + /* no fixed counter */ + if (!pmu->type->fixed_ctl) + return -EINVAL; + /* + * if there is only one fixed counter, only the first pmu + * can access the fixed counter + */ + if (pmu->type->single_fixed && pmu->pmu_idx > 0) + return -EINVAL; + + /* fixed counters have event field hardcoded to zero */ + hwc->config = 0ULL; + } else { + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + if (pmu->type->ops->hw_config) { + ret = pmu->type->ops->hw_config(box, event); + if (ret) + return ret; + } + } + + if (event->group_leader != event) + ret = uncore_validate_group(pmu, event); + else + ret = 0; + + return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + cpumask_t *active_mask; + struct pmu *pmu; + struct zhaoxin_uncore_pmu *uncore_pmu; + + pmu = dev_get_drvdata(dev); + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(uncore_pmu->type->name, "llc")) + active_mask = &uncore_cpu_cluster_mask; + else + active_mask = &uncore_cpu_subnode_mask; + } else { + active_mask = &uncore_cpu_mask; + } + return cpumap_print_to_pagebuf(true, buf, active_mask); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ + int ret; + + if (!pmu->type->pmu) { + pmu->pmu = (struct pmu) { + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + } else { + pmu->pmu = *pmu->type->pmu; + pmu->pmu.attr_groups = pmu->type->attr_groups; + } + + if (pmu->type->num_boxes == 1) { + if (strlen(pmu->type->name) > 0) + sprintf(pmu->name, "uncore_%s", pmu->type->name); + else + sprintf(pmu->name, "uncore"); + } else { + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, + pmu->pmu_idx); + } + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (!ret) + pmu->registered = true; + return ret; +} + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ + if (!pmu->registered) + return; + perf_pmu_unregister(&pmu->pmu); + pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ + int i, max; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } + + for (i = 0; i < max; i++) + kfree(pmu->boxes[i]); + kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + int i; + + if (pmu) { + for (i = 0; i < type->num_boxes; i++, pmu++) { + uncore_pmu_unregister(pmu); + uncore_free_boxes(pmu); + } + kfree(type->pmus); + type->pmus = NULL; + } + kfree(type->events_group); + type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ + for (; *types; types++) + uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ + struct zhaoxin_uncore_pmu *pmus; + size_t size; + int i, j; + + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); + if (!pmus) + return -ENOMEM; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + size = max_clusters * sizeof(struct zhaoxin_uncore_box *); + else + size = max_subnodes * sizeof(struct zhaoxin_uncore_box *); + } else { + size = max_packages * sizeof(struct zhaoxin_uncore_box *); + } + + for (i = 0; i < type->num_boxes; i++) { + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); + if (!pmus[i].boxes) + goto err; + } + + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, + 0, type->num_counters, 0, 0); + + if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; + for (i = 0; type->event_descs[i].attr.attr.name; i++) + ; + + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); + if (!attr_group) + goto err; + + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; + + for (j = 0; j < i; j++) + attr_group->attrs[j] = &type->event_descs[j].attr.attr; + + type->events_group = &attr_group->group; + } + + type->pmu_group = &uncore_pmu_attr_group; + + return 0; + +err: + for (i = 0; i < type->num_boxes; i++) + kfree(pmus[i].boxes); + kfree(pmus); + + return -ENOMEM; +} + +static int __init +uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ + int ret; + + for (; *types; types++) { + ret = uncore_type_init(*types, setid); + if (ret) + return ret; + } + return 0; +} + +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_box **boxes; + char mc_dev[10]; + int loop = 1; + int i, j = 0; + int subnode_id = 0; + int ret = 0; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + subnode_id = uncore_pcibus_to_subnodeid(pdev->bus); + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + strscpy(mc_dev, "mc0", sizeof("mc0")); + if (!strcmp(type->name, mc_dev)) + loop = 2; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); + if (!strcmp(type->name, mc_dev)) + loop = 4; + } + + boxes = kcalloc(loop, sizeof(struct zhaoxin_uncore_box *), GFP_KERNEL); + if (!boxes) + return -ENOMEM; + + for (i = 0; i < loop; i++) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + + if (!type) + continue; + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + + if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) + return -EINVAL; + + box = uncore_alloc_box(type, NUMA_NO_NODE); + if (!box) + return -ENOMEM; + + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); + + atomic_inc(&box->refcnt); + box->subnode_id = subnode_id; + box->pci_dev = pdev; + box->pmu = pmu; + uncore_box_init(box); + boxes[i] = box; + + pci_set_drvdata(pdev, boxes); + pmu->boxes[subnode_id] = box; + if (atomic_inc_return(&pmu->activeboxes) > 1) { + if (!strcmp(type->name, mc_dev)) + goto next_loop; + else + return 0; + } + /* First active box registers the pmu */ + ret = uncore_pmu_register(pmu); + if (ret) { + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + uncore_box_exit(box); + kfree(box); + } +next_loop: + j++; + } + + return ret; +} + +static void uncore_pci_remove(struct pci_dev *pdev) +{ + struct zhaoxin_uncore_box **boxes; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_pmu *pmu; + int subnode_id = 0; + int i = 0; + int loop = 1; + + boxes = pci_get_drvdata(pdev); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc0")) + loop = 2; + else + loop = 1; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) + loop = 4; + else + loop = 1; + } + + + for (i = 0; i < loop; i++) { + box = boxes[i]; + pmu = box->pmu; + if (WARN_ON_ONCE(subnode_id != box->subnode_id)) + return; + + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + + uncore_box_exit(box); + kfree(box); + } + + kfree(boxes); +} + +static int __init uncore_pci_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_pci_uncores, false); + if (ret) + goto errtype; + + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; + + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + + pcidrv_registered = true; + return 0; + +errtype: + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + uncore_pci_uncores = empty_uncore; + return ret; +} + +static void uncore_pci_exit(void) +{ + if (pcidrv_registered) { + pcidrv_registered = false; + pci_unregister_driver(uncore_pci_driver); + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + } +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, + int new_cpu) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + struct zhaoxin_uncore_box *box; + int i, package_id, cluster_id = 0, subnode_id = 0; + + package_id = zx_topology_package_id(old_cpu < 0 ? new_cpu : old_cpu); + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + cluster_id = zx_topology_cluster_id(old_cpu < 0 ? new_cpu : old_cpu); + subnode_id = zx_topology_subnode_id(old_cpu < 0 ? new_cpu : old_cpu); + } + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) { + box = pmu->boxes[cluster_id]; + if (!box) + continue; + } else { + box = pmu->boxes[subnode_id]; + if (!box) + continue; + } + } else { + box = pmu->boxes[package_id]; + if (!box) + continue; + } + + if (old_cpu < 0) { + + WARN_ON_ONCE(box->cpu != -1); + box->cpu = new_cpu; + continue; + } + WARN_ON_ONCE(box->cpu != old_cpu); + box->cpu = -1; + if (new_cpu < 0) + continue; + + uncore_pmu_cancel_hrtimer(box); + perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); + box->cpu = new_cpu; + } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, + int old_cpu, int new_cpu) +{ + for (; *uncores; uncores++) + uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +struct zhaoxin_uncore_type *uncore_msr_cluster_uncores[] = { + &kh40000_uncore_llc_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_msr_subnode_uncores[] = { + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_pci_subnode_uncores[] = { + &kh40000_uncore_mc0, + &kh40000_uncore_mc1, + &kh40000_uncore_pci, + &kh40000_uncore_zpi_dll, + &kh40000_uncore_zdi_dll, + &kh40000_uncore_pxptrf, + NULL, +}; + +static void kx5000_event_cpu_offline(int cpu) +{ + int package, target; + + /* Check if exiting cpu is used for collecting uncore events */ + + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + goto unref_cpu_mask; + + /* Find a new cpu to collect uncore events */ + target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + + /* Migrate uncore events to the new target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &uncore_cpu_mask); + else + target = -1; + + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); + +unref_cpu_mask: + /*clear the references*/ + package = zx_topology_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, package); + uncore_box_unref(uncore_mmio_uncores, package); +} + +static void kh40000_event_cpu_offline(int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + /* Check if exiting cpu is used for collecting uncore events */ + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_cluster_mask)) { + cluster_target = cpumask_any_but(topology_cluster_core_cpumask(cpu), cpu); + if (cluster_target < nr_cpu_ids) + cpumask_set_cpu(cluster_target, &uncore_cpu_cluster_mask); + else + cluster_target = -1; + uncore_change_context(uncore_msr_cluster_uncores, cpu, cluster_target); + } else { + uncore_box_unref(uncore_msr_cluster_uncores, cluster_id); + } + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_subnode_mask)) { + subnode_target = cpumask_any_but(topology_subnode_core_cpumask(cpu), cpu); + if (subnode_target < nr_cpu_ids) + cpumask_set_cpu(subnode_target, &uncore_cpu_subnode_mask); + else + subnode_target = -1; + uncore_change_context(uncore_msr_subnode_uncores, cpu, subnode_target); + uncore_change_context(uncore_pci_subnode_uncores, cpu, subnode_target); + } else { + uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); + } + +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + unsigned int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_event_cpu_offline(cpu); + else + kx5000_event_cpu_offline(cpu); + + return 0; +} + +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->package_id = id; + list_add(&box->active_list, &allocated); + } + } + + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + if (!strcmp(type->name, "llc")) + box->cluster_id = id; + else + box->subnode_id = id; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, + int id, unsigned int cpu) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i, ret = 0; + + int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + ret = kh40000_allocate_boxes(types, id, cpu); + else + ret = kx5000_allocate_boxes(types, id, cpu); + + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } + return 0; +} + +static int kx5000_event_cpu_online(unsigned int cpu) +{ + int package, target, msr_ret, mmio_ret; + + package = zx_topology_package_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, package, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the package + * which collects uncore events already. + */ + target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); + + return 0; +} + +static int kh40000_event_cpu_online(unsigned int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + int cluster_ret, subnode_ret; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + cluster_ret = uncore_box_ref(uncore_msr_cluster_uncores, cluster_id, cpu); + subnode_ret = uncore_box_ref(uncore_msr_subnode_uncores, subnode_id, cpu); + + if (cluster_ret && subnode_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the cluster or subnode + * which collects uncore events already. + */ + + cluster_target = + cpumask_any_and(&uncore_cpu_cluster_mask, topology_cluster_core_cpumask(cpu)); + subnode_target = + cpumask_any_and(&uncore_cpu_subnode_mask, topology_subnode_core_cpumask(cpu)); + + if (cluster_target < nr_cpu_ids && subnode_target < nr_cpu_ids) + return 0; + + if (!cluster_ret && cluster_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_cluster_mask); + uncore_change_context(uncore_msr_cluster_uncores, -1, cpu); + } + + if (!subnode_ret && subnode_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_subnode_mask); + uncore_change_context(uncore_msr_subnode_uncores, -1, cpu); + uncore_change_context(uncore_pci_subnode_uncores, -1, cpu); + } + + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int x86_model; + int kx5000_ret = 0, kh40000_ret = 0; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_ret = kh40000_event_cpu_online(cpu); + else + kx5000_ret = kx5000_event_cpu_online(cpu); + + if (kx5000_ret || kh40000_ret) + return -ENOMEM; + + return 0; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ + int i, ret; + + for (i = 0; i < type->num_boxes; i++) { + ret = uncore_pmu_register(&type->pmus[i]); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ + struct zhaoxin_uncore_type **types = uncore_msr_uncores; + int ret; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_cpu_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_msr_uncores, true); + if (ret) + goto err; + + ret = uncore_msr_pmus_register(); + if (ret) + goto err; + return 0; +err: + uncore_types_exit(uncore_msr_uncores); + uncore_msr_uncores = empty_uncore; + return ret; +} + +static int __init uncore_mmio_init(void) +{ + struct zhaoxin_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + +struct zhaoxin_uncore_init_fun { + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); +}; + +void kx5000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx5000_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx5000_uncore_init __initconst = { + .cpu_init = kx5000_uncore_cpu_init, +}; + +void kh40000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kh40000_msr_uncores; +} + +int kh40000_uncore_pci_init(void) +{ + int ret = kh40000_pci2node_map_init();/*pci_bus to package mapping, do nothing*/ + + if (ret) + return ret; + uncore_pci_uncores = kh40000_pci_uncores; + uncore_pci_driver = &kh40000_uncore_pci_driver; + return 0; +} + +static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { + .cpu_init = kh40000_uncore_cpu_init, + .pci_init = kh40000_uncore_pci_init, +}; + +void kx8000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx8000_msr_uncores; +} + +int kx8000_uncore_pci_init(void) +{ + uncore_pci_uncores = kx8000_pci_uncores; + uncore_pci_driver = &kx8000_uncore_pci_driver; + + return 0; +} + +void kx8000_uncore_mmio_init(void) +{ + uncore_mmio_uncores = kx8000_mmio_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx8000_uncore_init __initconst = { + .cpu_init = kx8000_uncore_cpu_init, + .pci_init = kx8000_uncore_pci_init, + .mmio_init = kx8000_uncore_mmio_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + {}, +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ + const struct x86_cpu_id *id = NULL; + struct zhaoxin_uncore_init_fun *uncore_init; + int pret = 0, cret = 0, mret = 0, ret; + + id = x86_match_cpu(zhaoxin_uncore_match); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return -ENODEV; + + pr_info("welcome to uncore.\n"); + + get_topology_number(); + get_topology_info(); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + zx_gen_core_map(); + get_pcibus_limit(); + } + + uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + + if (uncore_init->pci_init) { + pret = uncore_init->pci_init(); + if (!pret) + pret = uncore_pci_init(); + } + + if (uncore_init->cpu_init) { + uncore_init->cpu_init(); + cret = uncore_cpu_init(); + } + + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; + pr_info("uncore init success!\n"); + + return 0; + +err: + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); + pr_info("uncore init fail!\n"); + + return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 000000000000..5d09696f8bc7 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include + +#include +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX8000 0x6b + + + +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) + +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) + +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int num_shared_regs:8; + unsigned int single_fixed:1; + unsigned int pair_ctr_ctl:1; + unsigned int *msr_offsets; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct zhaoxin_uncore_pmu *pmus; + struct zhaoxin_uncore_ops *ops; + struct uncore_event_desc *event_descs; + const struct attribute_group *attr_groups[4]; + struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { + void (*init_box)(struct zhaoxin_uncore_box *box); + void (*exit_box)(struct zhaoxin_uncore_box *box); + void (*disable_box)(struct zhaoxin_uncore_box *box); + void (*enable_box)(struct zhaoxin_uncore_box *box); + void (*disable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + void (*enable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + u64 (*read_counter)(struct zhaoxin_uncore_box *box, struct perf_event *event); + int (*hw_config)(struct zhaoxin_uncore_box *box, struct perf_event *event); + struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *box, + struct perf_event *event); + void (*put_constraint)(struct zhaoxin_uncore_box *box, struct perf_event *event); +}; + +struct zhaoxin_uncore_pmu { + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; +}; + +struct zhaoxin_uncore_extra_reg { + raw_spinlock_t lock; + u64 config, config1, config2; + atomic_t ref; +}; + +struct zhaoxin_uncore_box { + int pci_phys_id; + int package_id; /*Package ID */ + int cluster_id; + int subnode_id; + int n_active; /* number of active events */ + int n_events; + int cpu; /* cpu to collect events */ + unsigned long flags; + atomic_t refcnt; + struct perf_event *events[UNCORE_PMC_IDX_MAX]; + struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; + unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + u64 tags[UNCORE_PMC_IDX_MAX]; + struct pci_dev *pci_dev; + struct zhaoxin_uncore_pmu *pmu; + u64 hrtimer_duration; /* hrtimer timeout for this box */ + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void __iomem *io_addr; + struct zhaoxin_uncore_extra_reg shared_regs[]; +}; + +#define UNCORE_BOX_FLAG_INITIATED 0 + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct hw_info { + u64 config_info; + u64 active_state; +}; + +ssize_t zx_uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ + .config = _config, \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline +unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + +static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr; +} + +static inline +unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return idx * 4 + box->pmu->type->event_ctl; +} + +static inline +unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (!strncmp(box->pmu->type->name, "mc_", 3)) + return idx * 2 + box->pmu->type->perf_ctr; + else + return idx * 8 + box->pmu->type->perf_ctr; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ + struct zhaoxin_uncore_pmu *pmu = box->pmu; + + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->box_ctl) + return 0; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->fixed_ctl) + return 0; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->event_ctl + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctl(box); + else + return uncore_msr_fixed_ctl(box); +} + +static inline +unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctr(box); + else + return uncore_msr_fixed_ctr(box); +} + +static inline +unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_event_ctl(box, idx); + else + return uncore_msr_event_ctl(box, idx); +} + +static inline +unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_perf_ctr(box, idx); + else + return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->disable_box) + box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->enable_box) + box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->exit_box) + box->pmu->type->ops->exit_box(box); + } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ + return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ + return event->pmu_private; +} + + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event); +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint * +uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); -- Gitee From c166e61d726d761f19965c2f6697945cb8611f9f Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 12 Mar 2024 15:05:43 +0800 Subject: [PATCH 226/953] anolis: cgroup: fix compile error when only config CONFIG_CGROUPS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8517 When CONFIG_CGROUPS is enabled without CONFIG_CGROUP_WRITEBACK, it will output the following errors when compile: ./include/linux/backing-dev.h:394:51: error: ‘struct cgroup_subsys’ declared inside parameter list will not be visible outside of this definition or declaration [-Werror] 394 | static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, | ^~~~~~~~~~~~~ ./include/linux/backing-dev.h:409:51: error: ‘struct cgroup_subsys’ declared inside parameter list will not be visible outside of this definition or declaration [-Werror] 409 | static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, | ^~~~~~~~~~~~~ Fix it by explicitly declaring "struct cgroup_subsys" in this case. Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2875 --- include/linux/backing-dev.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 506f89a99a6c..81adf07c9637 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -391,6 +391,8 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) } #ifdef CONFIG_CGROUPS +struct cgroup_subsys; + static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct list_head *tmp_links, struct css_set *cset) -- Gitee From 665029d8b557394d68a65c3a54c202ec80d38e75 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sat, 21 Oct 2023 10:01:37 +0800 Subject: [PATCH 227/953] erofs: don't warn MicroLZMA format anymore ANBZ: #8524 commit 798eecaea0f0366306cbc76986a83041a7e8669f upstream. The LZMA algorithm support has been landed for more than one year since Linux 5.16. Besides, the new XZ Utils 5.4 has been available in most Linux distributions. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231021020137.1646959-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/Kconfig | 7 ++----- fs/erofs/decompressor_lzma.c | 2 -- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index f6dc961e6c2b..e540648dedc2 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -91,13 +91,10 @@ config EROFS_FS_ZIP_LZMA select XZ_DEC_MICROLZMA help Saying Y here includes support for reading EROFS file systems - containing LZMA compressed data, specifically called microLZMA. it - gives better compression ratios than the LZ4 algorithm, at the + containing LZMA compressed data, specifically called microLZMA. It + gives better compression ratios than the default LZ4 format, at the expense of more CPU overhead. - LZMA support is an experimental feature for now and so most file - systems will be readable without selecting this option. - If unsure, say N. config EROFS_FS_ZIP_DEFLATE diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index ba4ec73f4aae..852dd8eac5df 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -96,8 +96,6 @@ int z_erofs_load_lzma_config(struct super_block *sb, return -EINVAL; } - erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!"); - /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */ mutex_lock(&lzma_resize_mutex); -- Gitee From c8685fb7412395ab73fc29416b333176732d1699 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 26 Oct 2023 10:16:26 +0800 Subject: [PATCH 228/953] erofs: get rid of ROOT_NID() ANBZ: #8524 commit 6b8a113cae6cc517579a33ad484355c3e4b3d8e7 upstream. Let's open code this helper for simplicity. Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231026021627.23284-1-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/internal.h | 2 -- fs/erofs/super.c | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 1a4fe9f60295..8f9e5ac1059a 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -228,8 +228,6 @@ struct erofs_buf { }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -#define ROOT_NID(sb) ((sb)->root_nid) - #define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits) #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index c9f9a43197db..bcc62027d4f9 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -661,13 +661,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) xa_init(&sbi->managed_pslots); #endif - inode = erofs_iget(sb, ROOT_NID(sbi)); + inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) return PTR_ERR(inode); if (!S_ISDIR(inode->i_mode)) { erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); + sbi->root_nid, inode->i_mode); iput(inode); return -EINVAL; } @@ -697,7 +697,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; - erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); + erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); return 0; } -- Gitee From 7fe71c33a0566589026d6c2aca478e1384d751e7 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 26 Oct 2023 10:16:27 +0800 Subject: [PATCH 229/953] erofs: tidy up redundant includes ANBZ: #8524 commit f5deddce60b50b55bcafeebaab1408d203b0f204 upstream. - Remove unused includes like and ; - Move common includes into "internal.h". Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231026021627.23284-2-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/data.c | 2 -- fs/erofs/decompressor.c | 1 - fs/erofs/decompressor_deflate.c | 1 - fs/erofs/decompressor_lzma.c | 1 - fs/erofs/internal.h | 2 ++ fs/erofs/super.c | 3 --- 6 files changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index a75c89a9a3d0..2cd747f4dc8f 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -5,9 +5,7 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" -#include #include -#include #include void erofs_unmap_metabuf(struct erofs_buf *buf) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index d36b3963c0bf..066ddc03b7b4 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -4,7 +4,6 @@ * https://www.huawei.com/ */ #include "compress.h" -#include #include #ifndef LZ4_DISTANCE_MAX /* history window size */ diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 0e1946a6bda5..daf3c1bdeab8 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include "compress.h" diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 852dd8eac5df..2dd14f99c1dc 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include -#include #include "compress.h" struct z_erofs_lzma { diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 8f9e5ac1059a..410f5af62354 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -8,8 +8,10 @@ #define __EROFS_INTERNAL_H #include +#include #include #include +#include #include #include #include diff --git a/fs/erofs/super.c b/fs/erofs/super.c index bcc62027d4f9..4ca9e5e3e539 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -4,14 +4,11 @@ * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ -#include #include -#include #include #include #include #include -#include #include #include "xattr.h" -- Gitee From 9c8b18056267ebe21d75611a0e5c6fca133b778e Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 9 Nov 2023 19:18:22 +0800 Subject: [PATCH 230/953] erofs: simplify erofs_read_inode() ANBZ: #8524 commit 914fa861e3d7803c9bbafc229652c2a69edb8b60 upstream. After commit 1c7f49a76773 ("erofs: tidy up EROFS on-disk naming"), there is a unique `union erofs_inode_i_u` so that we could parse the union directly. Besides, it also replaces `inode->i_sb` with `sb` for simplicity. Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231109111822.17944-1-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/inode.c | 98 +++++++++++++++++------------------------------- 1 file changed, 35 insertions(+), 63 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index edc8ec7581b8..7f63e7c01ffc 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -15,11 +15,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); const erofs_off_t inode_loc = erofs_iloc(inode); - erofs_blk_t blkaddr, nblks = 0; void *kaddr; struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; + union erofs_inode_i_u iu; unsigned int ifmt; int err; @@ -35,9 +35,8 @@ static void *erofs_read_inode(struct erofs_buf *buf, dic = kaddr + *ofs; ifmt = le16_to_cpu(dic->i_format); - if (ifmt & ~EROFS_I_ALL) { - erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -45,7 +44,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { - erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", + erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -82,40 +81,15 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(die->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = die->i_u; i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - - /* extended inode has its own timestamp */ + /* each extended inode has its own timestamp */ inode_set_ctime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); - - /* total blocks for compressed files */ - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(die->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - /* fill chunked inode summary info */ - vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); copied = NULL; break; @@ -125,49 +99,51 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(dic->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time for compact inodes */ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec); inode->i_size = le32_to_cpu(dic->i_size); - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(dic->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: - erofs_err(inode->i_sb, - "unsupported on-disk inode version %u of nid %llu", + erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } - if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + break; + case S_IFCHR: + case S_IFBLK: + inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev)); + break; + case S_IFIFO: + case S_IFSOCK: + inode->i_rdev = 0; + break; + default: + erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, + vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + + /* total blocks for compressed files */ + if (erofs_inode_is_data_compressed(vi->datalayout)) { + nblks = le32_to_cpu(iu.compressed_blocks); + } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(iu.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { - erofs_err(inode->i_sb, - "unsupported chunk format %x of nid %llu", + erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -190,10 +166,6 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); return kaddr; -bogusimode: - erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", - inode->i_mode, vi->nid); - err = -EFSCORRUPTED; err_out: DBG_BUGON(1); kfree(copied); -- Gitee From 277e707eb540d6e3038b956ab8134abfffaecb59 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 17 Nov 2023 16:53:29 +0800 Subject: [PATCH 231/953] MAINTAINERS: erofs: add EROFS webpage ANBZ: #8524 commit 62b241efff99fc4d88a86f1c67c7516e31f432a3 upstream. Add a new `W:` field of the EROFS entry points to the documentation site at . In addition, update the in-tree documentation and Kconfig too. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231117085329.1624223-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- Documentation/filesystems/erofs.rst | 4 ++++ MAINTAINERS | 1 + fs/erofs/Kconfig | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index f200d7874495..445224817823 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs): - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git +For more information, please also refer to the documentation site: + +- https://erofs.docs.kernel.org + Bugs and patches are welcome, please kindly help us and send to the following linux-erofs mailing list: diff --git a/MAINTAINERS b/MAINTAINERS index 81a421b24f42..5ecb213b4ef4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7741,6 +7741,7 @@ R: Yue Hu R: Jeffle Xu L: linux-erofs@lists.ozlabs.org S: Maintained +W: https://erofs.docs.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git F: Documentation/ABI/testing/sysfs-fs-erofs F: Documentation/filesystems/erofs.rst diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index e540648dedc2..1d318f85232d 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -21,7 +21,7 @@ config EROFS_FS performance under extremely memory pressure without extra cost. See the documentation at - for more details. + and the web pages at for more details. If unsure, say N. -- Gitee From a9564f9ae531d272a8b88cc5981cd431b2073860 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:53 +0800 Subject: [PATCH 232/953] erofs: support I/O submission for sub-page compressed blocks ANBZ: #8524 commit 192351616a9dde686492bcb9d1e4895a1411a527 upstream. Add a basic I/O submission path first to support sub-page blocks: - Temporary short-lived pages will be used entirely; - In-place I/O pages can be used partially, but compressed pages need to be able to be mapped in contiguous virtual memory. As a start, currently cache decompression is explicitly disabled for sub-page blocks, which will be supported in the future. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-2-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 156 ++++++++++++++++++++++------------------------- 1 file changed, 74 insertions(+), 82 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1c0e6167d8e7..1bbd76e5220c 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1435,86 +1435,85 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, z_erofs_decompressqueue_work(&io->u.work); } -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct page **pagepool, - struct address_space *mc) +static void z_erofs_fill_bio_vec(struct bio_vec *bvec, + struct z_erofs_decompress_frontend *f, + struct z_erofs_pcluster *pcl, + unsigned int nr, + struct address_space *mc) { - const pgoff_t index = pcl->obj.index; gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - + struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr; struct address_space *mapping; - struct page *oldpage, *page; - int justfound; + struct page *page, *oldpage; + int justfound, bs = i_blocksize(f->inode); + /* Except for inplace pages, the entire page can be used for I/Os */ + bvec->bv_offset = 0; + bvec->bv_len = PAGE_SIZE; repeat: - page = READ_ONCE(pcl->compressed_bvecs[nr].page); - oldpage = page; - - if (!page) + oldpage = READ_ONCE(zbv->page); + if (!oldpage) goto out_allocpage; - justfound = (unsigned long)page & 1UL; - page = (struct page *)((unsigned long)page & ~1UL); + justfound = (unsigned long)oldpage & 1UL; + page = (struct page *)((unsigned long)oldpage & ~1UL); + bvec->bv_page = page; + DBG_BUGON(z_erofs_is_shortlived_page(page)); /* - * preallocated cached pages, which is used to avoid direct reclaim - * otherwise, it will go inplace I/O path instead. + * Handle preallocated cached pages. We tried to allocate such pages + * without triggering direct reclaim. If allocation failed, inplace + * file-backed pages will be used instead. */ if (page->private == Z_EROFS_PREALLOCATED_PAGE) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); set_page_private(page, 0); + WRITE_ONCE(zbv->page, page); tocache = true; goto out_tocache; } - mapping = READ_ONCE(page->mapping); + mapping = READ_ONCE(page->mapping); /* - * file-backed online pages in plcuster are all locked steady, - * therefore it is impossible for `mapping' to be NULL. + * File-backed pages for inplace I/Os are all locked steady, + * therefore it is impossible for `mapping` to be NULL. */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - /* directly return for shortlived page as well */ - if (z_erofs_is_shortlived_page(page)) - goto out; + if (mapping && mapping != mc) { + if (zbv->offset < 0) + bvec->bv_offset = round_up(-zbv->offset, bs); + bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset; + return; + } lock_page(page); - /* only true if page reclaim goes wrong, should never happen */ DBG_BUGON(justfound && PagePrivate(page)); - /* the page is still in manage cache */ + /* the cached page is still in managed cache */ if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); - + WRITE_ONCE(zbv->page, page); + /* + * The cached page is still available but without a valid + * `->private` pcluster hint. Let's reconnect them. + */ if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_bvecs[]. - */ DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); + /* compressed_bvecs[] already takes a ref */ + attach_page_private(page, pcl); + put_page(page); } - /* no need to submit io if it is already up-to-date */ + /* no need to submit if it is already up-to-date */ if (PageUptodate(page)) { unlock_page(page); - page = NULL; + bvec->bv_page = NULL; } - goto out; + return; } /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. + * It has been truncated, so it's unsafe to reuse this one. Let's + * allocate a new page for compressed data. */ DBG_BUGON(page->mapping); DBG_BUGON(!justfound); @@ -1523,25 +1522,23 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, unlock_page(page); put_page(page); out_allocpage: - page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, - oldpage, page)) { - erofs_pagepool_add(pagepool, page); + page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + if (oldpage != cmpxchg(&zbv->page, oldpage, page)) { + erofs_pagepool_add(&f->pagepool, page); cond_resched(); goto repeat; } + bvec->bv_page = page; out_tocache: - if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { - /* turn into temporary page if fails (1 ref) */ + if (!tocache || bs != PAGE_SIZE || + add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived page (1 ref) */ set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); - goto out; + return; } attach_page_private(page, pcl); - /* drop a refcount added by allocpage (then we have 2 refs here) */ + /* drop a refcount added by allocpage (then 2 refs in total here) */ put_page(page); - -out: /* the only exit (for tracing and debugging) */ - return page; } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, @@ -1596,7 +1593,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static void z_erofs_decompressqueue_endio(struct bio *bio) +static void z_erofs_submissionqueue_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; @@ -1608,7 +1605,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio) DBG_BUGON(PageUptodate(page)); DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { if (!err) SetPageUptodate(page); @@ -1631,17 +1627,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; z_erofs_next_pcluster_t owned_head = f->owned_head; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ - pgoff_t last_index; + erofs_off_t last_pa; struct block_device *last_bdev; unsigned int nr_bios = 0; struct bio *bio = NULL; unsigned long pflags; int memstall = 0; - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ + /* No need to read from device for pclusters in the bypass queue. */ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); @@ -1654,7 +1647,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, do { struct erofs_map_dev mdev; struct z_erofs_pcluster *pcl; - pgoff_t cur, end; + erofs_off_t cur, end; + struct bio_vec bvec; unsigned int i = 0; bool bypass = true; @@ -1673,18 +1667,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, }; (void)erofs_map_dev(sb, &mdev); - cur = erofs_blknr(sb, mdev.m_pa); - end = cur + pcl->pclusterpages; - + cur = mdev.m_pa; + end = cur + (pcl->pclusterpages << PAGE_SHIFT); do { - struct page *page; - - page = pickup_page_for_submission(pcl, i++, - &f->pagepool, mc); - if (!page) + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) continue; - if (bio && (cur != last_index + 1 || + if (bio && (cur != last_pa || last_bdev != mdev.m_bdev)) { submit_bio_retry: submit_bio(bio); @@ -1695,7 +1685,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; } - if (unlikely(PageWorkingset(page)) && !memstall) { + if (unlikely(PageWorkingset(bvec.bv_page)) && + !memstall) { psi_memstall_enter(&pflags); memstall = 1; } @@ -1703,23 +1694,24 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (!bio) { bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOIO); - bio->bi_end_io = z_erofs_decompressqueue_endio; - - last_bdev = mdev.m_bdev; - bio->bi_iter.bi_sector = (sector_t)cur << - (sb->s_blocksize_bits - 9); + bio->bi_end_io = z_erofs_submissionqueue_endio; + bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; + last_bdev = mdev.m_bdev; } - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, + bvec.bv_offset)) goto submit_bio_retry; - last_index = cur; + last_pa = cur + bvec.bv_len; bypass = false; - } while (++cur < end); + } while ((cur += bvec.bv_len) < end); if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; -- Gitee From 4407c0e5dda9f35649fe35f4c8fd61a60436496d Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:54 +0800 Subject: [PATCH 233/953] erofs: record `pclustersize` in bytes instead of pages ANBZ: #8524 commit 54ed3fdd66055d073cb1cd2c6c65bbc0683c40cf upstream. Currently, compressed sizes are recorded in pages using `pclusterpages`, However, for tailpacking pclusters, `tailpacking_size` is used instead. This approach doesn't work when dealing with sub-page blocks. To address this, let's switch them to the unified `pclustersize` in bytes. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-3-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 64 ++++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1bbd76e5220c..5d5640173412 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -56,6 +56,9 @@ struct z_erofs_pcluster { /* L: total number of bvecs */ unsigned int vcnt; + /* I: pcluster size (compressed size) in bytes */ + unsigned int pclustersize; + /* I: page offset of start position of decompression */ unsigned short pageofs_out; @@ -70,14 +73,6 @@ struct z_erofs_pcluster { struct rcu_head rcu; }; - union { - /* I: physical cluster size in pages */ - unsigned short pclusterpages; - - /* I: tailpacking inline compressed size */ - unsigned short tailpacking_size; - }; - /* I: compression algorithm format */ unsigned char algorithmformat; @@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) { - if (z_erofs_is_inline_pcluster(pcl)) - return 1; - return pcl->pclusterpages; + return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } /* @@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void) return 0; } -static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) { - int i; + unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct z_erofs_pcluster_slab *pcs = pcluster_pool; - for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { - struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { struct z_erofs_pcluster *pcl; if (nrpages > pcs->maxpages) @@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); if (!pcl) return ERR_PTR(-ENOMEM); - pcl->pclusterpages = nrpages; + pcl->pclustersize = size; return pcl; } return ERR_PTR(-EINVAL); @@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool shouldalloc = z_erofs_should_alloc_cache(fe); bool standalone = true; /* @@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; - for (i = 0; i < pcl->pclusterpages; ++i) { - struct page *page; + for (i = 0; i < pclusterpages; ++i) { + struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - struct page *newpage = NULL; /* the compressed page was loaded before */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) @@ -585,6 +578,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (page) { t = (void *)((unsigned long)page | 1); + newpage = NULL; } else { /* I/O is needed, no possible to decompress directly */ standalone = false; @@ -592,9 +586,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) continue; /* - * try to use cached I/O if page allocation - * succeeds or fallback to in-place I/O instead - * to avoid any direct reclaim. + * Try cached I/O if allocation succeeds or fallback to + * in-place I/O instead to avoid any direct reclaim. */ newpage = erofs_allocpage(&fe->pagepool, gfp); if (!newpage) @@ -626,6 +619,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); @@ -633,7 +627,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, * refcount of workgroup is now freezed as 0, * therefore no need to worry about available decompression users. */ - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { struct page *page = pcl->compressed_bvecs[i].page; if (!page) @@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl = folio_get_private(folio); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool ret; int i; @@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) goto out; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { if (pcl->compressed_bvecs[i].page == &folio->page) { WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); ret = true; @@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; + struct super_block *sb = fe->inode->i_sb; bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; struct erofs_workgroup *grp; int err; if (!(map->m_flags & EROFS_MAP_ENCODED) || - (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { + (!ztailpacking && !erofs_blknr(sb, map->m_pa))) { DBG_BUGON(1); return -EFSCORRUPTED; } /* no available pcluster, let's allocate one */ - pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : - map->m_plen >> PAGE_SHIFT); + pcl = z_erofs_alloc_pcluster(map->m_plen); if (IS_ERR(pcl)) return PTR_ERR(pcl); @@ -815,9 +810,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) if (ztailpacking) { pcl->obj.index = 0; /* which indicates ztailpacking */ - pcl->tailpacking_size = map->m_plen; } else { - pcl->obj.index = map->m_pa >> PAGE_SHIFT; + pcl->obj.index = erofs_blknr(sb, map->m_pa); grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); if (IS_ERR(grp)) { @@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, unsigned int pclusterpages = z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decompressor = &erofs_decompressors[pcl->algorithmformat]; - unsigned int i, inputsize; - int err2; + int i, err2; struct page *page; bool overlapped; @@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, if (err) goto out; - if (z_erofs_is_inline_pcluster(pcl)) - inputsize = pcl->tailpacking_size; - else - inputsize = pclusterpages * PAGE_SIZE; - err = decompressor->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, .pageofs_in = pcl->pageofs_in, .pageofs_out = pcl->pageofs_out, - .inputsize = inputsize, + .inputsize = pcl->pclustersize, .outputsize = pcl->length, .alg = pcl->algorithmformat, .inplace_io = overlapped, @@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, (void)erofs_map_dev(sb, &mdev); cur = mdev.m_pa; - end = cur + (pcl->pclusterpages << PAGE_SHIFT); + end = cur + pcl->pclustersize; do { z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); if (!bvec.bv_page) -- Gitee From d760dc9037e894bfcea62e53884dbc33e5aef500 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:56 +0800 Subject: [PATCH 234/953] erofs: refine z_erofs_transform_plain() for sub-page block support ANBZ: #8524 commit 1ca01520148af399899ed66af5c78330bb9ecaf2 upstream. Sub-page block support is still unusable even with previous commits if interlaced PLAIN pclusters exist. Such pclusters can be found if the fragment feature is enabled. This commit tries to handle "the head part" of interlaced PLAIN pclusters first: it was once explained in commit fdffc091e6f9 ("erofs: support interlaced uncompressed data for compressed files"). It uses a unique way for both shifted and interlaced PLAIN pclusters. As an added bonus, PLAIN pclusters larger than the block size is also supported now for the upcoming large lclusters. Reviewed-by: Yue Hu Reviewed-by: Chao Yu [ Gao Xiang: min_t() will be used instead of min() for 6.6 LTS. ] Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-5-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor.c | 81 ++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 066ddc03b7b4..f67d26f1a5bc 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -320,43 +320,58 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, struct page **pagepool) { - const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - const unsigned int outpages = + const unsigned int nrpages_in = + PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; + const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = min_t(unsigned int, rq->outputsize, - PAGE_SIZE - rq->pageofs_out); - const unsigned int lefthalf = rq->outputsize - righthalf; - const unsigned int interlaced_offset = - rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; - u8 *src; - - if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(rq->pageofs_out); - return 0; + const unsigned int bs = rq->sb->s_blocksize; + unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; + u8 *kin; + + DBG_BUGON(rq->outputsize > rq->inputsize); + if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { + cur = bs - (rq->pageofs_out & (bs - 1)); + pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; + cur = min(cur, rq->outputsize); + if (cur && rq->out[0]) { + kin = kmap_local_page(rq->in[nrpages_in - 1]); + if (rq->out[0] == rq->in[nrpages_in - 1]) { + memmove(kin + rq->pageofs_out, kin + pi, cur); + flush_dcache_page(rq->out[0]); + } else { + memcpy_to_page(rq->out[0], rq->pageofs_out, + kin + pi, cur); + } + kunmap_local(kin); + } + rq->outputsize -= cur; } - src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; - if (rq->out[0]) - memcpy_to_page(rq->out[0], rq->pageofs_out, - src + interlaced_offset, righthalf); - - if (outpages > inpages) { - DBG_BUGON(!rq->out[outpages - 1]); - if (rq->out[outpages - 1] != rq->in[inpages - 1]) { - memcpy_to_page(rq->out[outpages - 1], 0, src + - (interlaced_offset ? 0 : righthalf), - lefthalf); - } else if (!interlaced_offset) { - memmove(src, src + righthalf, lefthalf); - flush_dcache_page(rq->in[inpages - 1]); - } + for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) { + insz = min_t(unsigned int, PAGE_SIZE - rq->pageofs_in, rq->outputsize); + rq->outputsize -= insz; + if (!rq->in[ni]) + continue; + kin = kmap_local_page(rq->in[ni]); + pi = 0; + do { + no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT; + po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; + DBG_BUGON(no >= nrpages_out); + cnt = min_t(unsigned int, insz - pi, PAGE_SIZE - po); + if (rq->out[no] == rq->in[ni]) { + memmove(kin + po, + kin + rq->pageofs_in + pi, cnt); + flush_dcache_page(rq->out[no]); + } else if (rq->out[no]) { + memcpy_to_page(rq->out[no], po, + kin + rq->pageofs_in + pi, cnt); + } + pi += cnt; + } while (pi < insz); + kunmap_local(kin); } - kunmap_local(src); + DBG_BUGON(ni > nrpages_in); return 0; } -- Gitee From 19eba65d90c8a4907cbffd5dbb677724408615ee Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:57 +0800 Subject: [PATCH 235/953] erofs: enable sub-page compressed block support ANBZ: #8524 commit 0ee3a0d59e007320167a2e9f4b8bf1304ada7771 upstream. Let's just disable cached decompression and inplace I/Os for partial pages as the first step in order to enable sub-page block initial support. In other words, currently it works primarily based on temporary short-lived pages. Don't expect too much in terms of performance. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-6-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/inode.c | 6 ++++-- fs/erofs/zdata.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 7f63e7c01ffc..c52f1bb5a597 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -258,8 +258,10 @@ static int erofs_fill_inode(struct inode *inode) if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP - if (!erofs_is_fscache_mode(inode->i_sb) && - inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { + if (!erofs_is_fscache_mode(inode->i_sb)) { + DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE, + erofs_info, inode->i_sb, + "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); inode->i_mapping->a_ops = &z_erofs_aops; err = 0; goto out_unlock; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 5d5640173412..8264936b8612 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -563,6 +563,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; + if (i_blocksize(fe->inode) != PAGE_SIZE) + return; if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; @@ -967,12 +969,12 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; const loff_t offset = page_offset(page); + const unsigned int bs = i_blocksize(inode); bool tight = true, exclusive; unsigned int cur, end, len, split; int err = 0; z_erofs_onlinepage_init(page); - split = 0; end = PAGE_SIZE; repeat: @@ -1021,7 +1023,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, * for inplace I/O or bvpage (should be processed in a strict order.) */ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || tight)); + exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE))); if (cur) tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); -- Gitee From a1340e8f954329a59355494d94e95b5ac86fe3ee Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Thu, 21 Dec 2023 14:23:41 +0800 Subject: [PATCH 236/953] erofs: allow partially filled compressed bvecs ANBZ: #8524 commit 652cdaa886e3ad1d051e5aef733c5a546171362f upstream. In order to reduce memory footprints even further, let's allow partially filled compressed bvecs for readahead to bail out later. Signed-off-by: Yue Hu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20231221062341.23901-1-zbestahu@gmail.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 8264936b8612..692c0c39be63 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1202,34 +1202,27 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; struct page *page = bvec->page; - /* compressed pages ought to be present before decompressing */ + /* compressed data ought to be valid before decompressing */ if (!page) { - DBG_BUGON(1); + err = -EIO; continue; } be->compressed_pages[i] = page; - if (z_erofs_is_inline_pcluster(pcl)) { + if (z_erofs_is_inline_pcluster(pcl) || + erofs_page_is_managed(EROFS_SB(be->sb), page)) { if (!PageUptodate(page)) err = -EIO; continue; } DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (!z_erofs_is_shortlived_page(page)) { - if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { - if (!PageUptodate(page)) - err = -EIO; - continue; - } - z_erofs_do_decompressed_bvec(be, bvec); - *overlapped = true; - } + if (z_erofs_is_shortlived_page(page)) + continue; + z_erofs_do_decompressed_bvec(be, bvec); + *overlapped = true; } - - if (err) - return err; - return 0; + return err; } static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, @@ -1238,7 +1231,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); - const struct z_erofs_decompressor *decompressor = + const struct z_erofs_decompressor *decomp = &erofs_decompressors[pcl->algorithmformat]; int i, err2; struct page *page; @@ -1274,10 +1267,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, err2 = z_erofs_parse_in_bvecs(be, &overlapped); if (err2) err = err2; - if (err) - goto out; - - err = decompressor->decompress(&(struct z_erofs_decompress_req) { + if (!err) + err = decomp->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, @@ -1291,7 +1282,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, .fillgaps = pcl->multibases, }, be->pagepool); -out: /* must handle all compressed pages before actual file pages */ if (z_erofs_is_inline_pcluster(pcl)) { page = pcl->compressed_bvecs[0].page; @@ -1302,7 +1292,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, /* consider shortlived pages added when decompressing */ page = be->compressed_pages[i]; - if (erofs_page_is_managed(sbi, page)) + if (!page || erofs_page_is_managed(sbi, page)) continue; (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); -- Gitee From 3117e1bb29346874d42a52e6a8d46c556f4071a4 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 27 Dec 2023 23:19:03 +0800 Subject: [PATCH 237/953] erofs: avoid debugging output for (de)compressed data ANBZ: #8524 commit 496530c7c1dfc159d59a75ae00b572f570710c53 upstream. Syzbot reported a KMSAN warning, erofs: (device loop0): z_erofs_lz4_decompress_mem: failed to decompress -12 in[46, 4050] out[917] ===================================================== BUG: KMSAN: uninit-value in hex_dump_to_buffer+0xae9/0x10f0 lib/hexdump.c:194 .. print_hex_dump+0x13d/0x3e0 lib/hexdump.c:276 z_erofs_lz4_decompress_mem fs/erofs/decompressor.c:252 [inline] z_erofs_lz4_decompress+0x257e/0x2a70 fs/erofs/decompressor.c:311 z_erofs_decompress_pcluster fs/erofs/zdata.c:1290 [inline] z_erofs_decompress_queue+0x338c/0x6460 fs/erofs/zdata.c:1372 z_erofs_runqueue+0x36cd/0x3830 z_erofs_read_folio+0x435/0x810 fs/erofs/zdata.c:1843 The root cause is that the printed decompressed buffer may be filled incompletely due to decompression failure. Since they were once only used for debugging, get rid of them now. Reported-and-tested-by: syzbot+6c746eea496f34b3161d@syzkaller.appspotmail.com Closes: https://lore.kernel.org/r/000000000000321c24060d7cfa1c@google.com Reviewed-by: Yue Hu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231227151903.2900413-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index f67d26f1a5bc..662d550256cf 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -247,15 +247,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, if (ret != rq->outputsize) { erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", ret, rq->inputsize, inputmargin, rq->outputsize); - - print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, rq->inputsize, true); - print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, - 16, 1, out, rq->outputsize, true); - if (ret >= 0) memset(out + ret, 0, rq->outputsize - ret); - ret = -EIO; + ret = -EFSCORRUPTED; } else { ret = 0; } -- Gitee From 7b02e5f9b0e1a6b4c83a193e6c14c4cc94646403 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Wed, 3 Jan 2024 05:32:02 -0700 Subject: [PATCH 238/953] erofs: make erofs_{err,info}() support NULL sb parameter ANBZ: #8524 commit aa12a790d31be14b289d5a2c6f41ca535fcc7841 upstream. Make erofs_err() and erofs_info() support NULL sb parameter for more general usage. Suggested-by: Gao Xiang Signed-off-by: Chunhai Guo Link: https://lore.kernel.org/r/20240103123202.3054718-1-guochunhai@vivo.com Reviewed-by: Jingbo Xu Reviewed-by: Gao Xiang Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor_deflate.c | 2 +- fs/erofs/super.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index daf3c1bdeab8..4a64a9c91dd3 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -70,7 +70,7 @@ int __init z_erofs_deflate_init(void) return 0; out_failed: - pr_err("failed to allocate zlib workspace\n"); + erofs_err(NULL, "failed to allocate zlib workspace"); z_erofs_deflate_exit(); return -ENOMEM; } diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 4ca9e5e3e539..02d6bdb95085 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -27,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + if (sb) + pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + else + pr_err("%s: %pV", func, &vaf); va_end(args); } @@ -41,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_info("(device %s): %pV", sb->s_id, &vaf); + if (sb) + pr_info("(device %s): %pV", sb->s_id, &vaf); + else + pr_info("%pV", &vaf); va_end(args); } -- Gitee From 8ace0fe8ea99252b0d783a33cfc11a1f1c52af4a Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 15 Jan 2024 22:46:35 +0800 Subject: [PATCH 239/953] erofs: Don't use certain unnecessary folio_*() functions ANBZ: #8524 commit 2b872b0f466d2acb4491da845c66b49246d5cdf9 upstream. Filesystems should use folio->index and folio->mapping, instead of folio_index(folio), folio_mapping() and folio_file_mapping() since they know that it's in the pagecache. Change this automagically with: perl -p -i -e 's/folio_mapping[(]([^)]*)[)]/\1->mapping/g' fs/erofs/*.c perl -p -i -e 's/folio_file_mapping[(]([^)]*)[)]/\1->mapping/g' fs/erofs/*.c perl -p -i -e 's/folio_index[(]([^)]*)[)]/\1->index/g' fs/erofs/*.c Reported-by: Matthew Wilcox Signed-off-by: David Howells Reviewed-by: Jeff Layton Cc: Chao Yu Cc: Yue Hu Cc: Jeffle Xu Cc: linux-erofs@lists.ozlabs.org Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240115144635.1931422-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/fscache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index afc37c9029ce..943ce796c0fc 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -178,10 +178,10 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) { int ret; - struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private; + struct erofs_fscache *ctx = folio->mapping->host->i_private; struct erofs_fscache_request *req; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); @@ -289,7 +289,7 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) struct erofs_fscache_request *req; int ret; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); -- Gitee From 37d70cda557d3b24e00fba16dbe9045a8190a657 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Wed, 24 Jan 2024 11:19:45 +0800 Subject: [PATCH 240/953] erofs: get rid of unneeded GFP_NOFS ANBZ: #8524 commit 97cf5d53b4812dcb52c13fda700dad5aa8d3446c upstream. Clean up some leftovers since there is no way for EROFS to be called again from a reclaim context. Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240124031945.130782-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/fscache.c | 2 +- fs/erofs/inode.c | 2 +- fs/erofs/utils.c | 2 +- fs/erofs/zdata.c | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 943ce796c0fc..122a4753ecea 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -473,7 +473,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &erofs_fscache_meta_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); inode->i_blkbits = EROFS_SB(sb)->blkszbits; inode->i_private = ctx; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index c52f1bb5a597..efe439a961c0 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -60,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, } else { const unsigned int gotten = sb->s_blocksize - *ofs; - copied = kmalloc(vi->inode_isize, GFP_NOFS); + copied = kmalloc(vi->inode_isize, GFP_KERNEL); if (!copied) { err = -ENOMEM; goto err_out; diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 4256a85719a1..603ded4db58e 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, repeat: xa_lock(&sbi->managed_pslots); pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, - NULL, grp, GFP_NOFS); + NULL, grp, GFP_KERNEL); if (pre) { if (xa_is_err(pre)) { pre = ERR_PTR(xa_err(pre)); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 692c0c39be63..583c062cd0e4 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -230,7 +230,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, struct page *nextpage = *candidate_bvpage; if (!nextpage) { - nextpage = erofs_allocpage(pagepool, GFP_NOFS); + nextpage = erofs_allocpage(pagepool, GFP_KERNEL); if (!nextpage) return -ENOMEM; set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); @@ -302,7 +302,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) if (nrpages > pcs->maxpages) continue; - pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); + pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL); if (!pcl) return ERR_PTR(-ENOMEM); pcl->pclustersize = size; @@ -694,7 +694,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio, DBG_BUGON(stop > folio_size(folio) || stop < length); if (offset == 0 && stop == folio_size(folio)) - while (!z_erofs_cache_release_folio(folio, GFP_NOFS)) + while (!z_erofs_cache_release_folio(folio, 0)) cond_resched(); } @@ -713,7 +713,7 @@ int erofs_init_managed_cache(struct super_block *sb) set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &z_erofs_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); EROFS_SB(sb)->managed_cache = inode; return 0; } -- Gitee From cfd03b3b0b13260d43ebaecf34eb6ae191991833 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 25 Jan 2024 20:00:39 +0800 Subject: [PATCH 241/953] erofs: fix infinite loop due to a race of filling compressed_bvecs ANBZ: #8524 commit cc4b2dd95f0d1eba8c691b36e8f4d1795582f1ff upstream. I encountered a race issue after lengthy (~594647 secs) stress tests on a 64k-page arm64 VM with several 4k-block EROFS images. The timing is like below: z_erofs_try_inplace_io z_erofs_fill_bio_vec cmpxchg(&compressed_bvecs[].page, NULL, ..) [access bufvec] compressed_bvecs[] = *bvec; Previously, z_erofs_submit_queue() just accessed bufvec->page only, so other fields in bufvec didn't matter. After the subpage block support is landed, .offset and .end can be used too, but filling bufvec isn't an atomic operation which can cause inconsistency. Let's use a spinlock to keep the atomicity of each bufvec. More specifically, just reuse the existing spinlock `pcl->obj.lockref.lock` since it's rarely used (also it takes a short time if even used) as long as the pcluster has a reference. Fixes: 192351616a9d ("erofs: support I/O submission for sub-page compressed blocks") Signed-off-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Sandeep Dhavale Link: https://lore.kernel.org/r/20240125120039.3228103-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 74 +++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 583c062cd0e4..c1c77166b30f 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -563,21 +563,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; - if (i_blocksize(fe->inode) != PAGE_SIZE) - return; - if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) + if (i_blocksize(fe->inode) != PAGE_SIZE || + fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; for (i = 0; i < pclusterpages; ++i) { struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - /* the compressed page was loaded before */ + /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - if (page) { t = (void *)((unsigned long)page | 1); newpage = NULL; @@ -597,9 +595,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); t = (void *)((unsigned long)newpage | 1); } - - if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) + spin_lock(&pcl->obj.lockref.lock); + if (!pcl->compressed_bvecs[i].page) { + pcl->compressed_bvecs[i].page = t; + spin_unlock(&pcl->obj.lockref.lock); continue; + } + spin_unlock(&pcl->obj.lockref.lock); if (page) put_page(page); @@ -718,31 +720,25 @@ int erofs_init_managed_cache(struct super_block *sb) return 0; } -static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, - struct z_erofs_bvec *bvec) -{ - struct z_erofs_pcluster *const pcl = fe->pcl; - - while (fe->icur > 0) { - if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, - NULL, bvec->page)) { - pcl->compressed_bvecs[fe->icur] = *bvec; - return true; - } - } - return false; -} - /* callers must be with pcluster lock held */ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { + struct z_erofs_pcluster *pcl = fe->pcl; int ret; if (exclusive) { /* give priority for inplaceio to use file pages first */ - if (z_erofs_try_inplace_io(fe, bvec)) + spin_lock(&pcl->obj.lockref.lock); + while (fe->icur > 0) { + if (pcl->compressed_bvecs[--fe->icur].page) + continue; + pcl->compressed_bvecs[fe->icur] = *bvec; + spin_unlock(&pcl->obj.lockref.lock); return 0; + } + spin_unlock(&pcl->obj.lockref.lock); + /* otherwise, check if it can be used as a bvpage */ if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && !fe->candidate_bvpage) @@ -1423,23 +1419,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, { gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr; + struct z_erofs_bvec zbv; struct address_space *mapping; - struct page *page, *oldpage; + struct page *page; int justfound, bs = i_blocksize(f->inode); /* Except for inplace pages, the entire page can be used for I/Os */ bvec->bv_offset = 0; bvec->bv_len = PAGE_SIZE; repeat: - oldpage = READ_ONCE(zbv->page); - if (!oldpage) + spin_lock(&pcl->obj.lockref.lock); + zbv = pcl->compressed_bvecs[nr]; + page = zbv.page; + justfound = (unsigned long)page & 1UL; + page = (struct page *)((unsigned long)page & ~1UL); + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); + if (!page) goto out_allocpage; - justfound = (unsigned long)oldpage & 1UL; - page = (struct page *)((unsigned long)oldpage & ~1UL); bvec->bv_page = page; - DBG_BUGON(z_erofs_is_shortlived_page(page)); /* * Handle preallocated cached pages. We tried to allocate such pages @@ -1448,7 +1447,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, */ if (page->private == Z_EROFS_PREALLOCATED_PAGE) { set_page_private(page, 0); - WRITE_ONCE(zbv->page, page); tocache = true; goto out_tocache; } @@ -1459,9 +1457,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * therefore it is impossible for `mapping` to be NULL. */ if (mapping && mapping != mc) { - if (zbv->offset < 0) - bvec->bv_offset = round_up(-zbv->offset, bs); - bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset; + if (zbv.offset < 0) + bvec->bv_offset = round_up(-zbv.offset, bs); + bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; return; } @@ -1471,7 +1469,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, /* the cached page is still in managed cache */ if (page->mapping == mc) { - WRITE_ONCE(zbv->page, page); /* * The cached page is still available but without a valid * `->private` pcluster hint. Let's reconnect them. @@ -1503,11 +1500,15 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, put_page(page); out_allocpage: page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&zbv->page, oldpage, page)) { + spin_lock(&pcl->obj.lockref.lock); + if (pcl->compressed_bvecs[nr].page) { erofs_pagepool_add(&f->pagepool, page); + spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); bvec->bv_page = page; out_tocache: if (!tocache || bs != PAGE_SIZE || @@ -1685,6 +1686,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (cur + bvec.bv_len > end) bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset)) goto submit_bio_retry; -- Gitee From 03f82e38156e320f91f68246285aa84b14157b34 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Fri, 26 Jan 2024 22:01:42 +0800 Subject: [PATCH 242/953] erofs: relaxed temporary buffers allocation on readahead ANBZ: #8524 commit d9281660ff3ffb4a05302b485cc59a87e709aefc upstream. Even with inplace decompression, sometimes very few temporary buffers may be still needed for a single decompression shot (e.g. 16 pages for 64k sliding window or 4 pages for 16k sliding window). In low-memory scenarios, it would be better to try to allocate with GFP_NOWAIT on readahead first. That can help reduce the time spent on page allocation under durative memory pressure. Here are detailed performance numbers under multi-app launch benchmark workload [1] on ARM64 Android devices (8-core CPU and 8GB of memory) running a 5.15 LTS kernel with EROFS of 4k pclusters: +----------------------------------------------+ | LZ4 | vanilla | patched | diff | |----------------+---------+---------+---------| | Average (ms) | 3364 | 2684 | -20.21% | [64k sliding window] |----------------+---------+---------+---------| | Average (ms) | 2079 | 1610 | -22.56% | [16k sliding window] +----------------------------------------------+ The total size of system images for 4k pclusters is almost unchanged: (64k sliding window) 9,117,044 KB (16k sliding window) 9,113,096 KB Therefore, in addition to switch the sliding window from 64k to 16k, after applying this patch, it can eventually save 52.14% (3364 -> 1610) on average with no memory reservation. That is particularly useful for embedded devices with limited resources. [1] https://lore.kernel.org/r/20240109074143.4138783-1-guochunhai@vivo.com Suggested-by: Gao Xiang Signed-off-by: Chunhai Guo Signed-off-by: Gao Xiang Reviewed-by: Yue Hu Link: https://lore.kernel.org/r/20240126140142.201718-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/compress.h | 5 ++--- fs/erofs/decompressor.c | 5 +++-- fs/erofs/decompressor_deflate.c | 19 +++++++++++++------ fs/erofs/decompressor_lzma.c | 17 ++++++++++++----- fs/erofs/zdata.c | 16 ++++++++++++---- 5 files changed, 42 insertions(+), 20 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 279933e007d2..7cc5841577b2 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -11,13 +11,12 @@ struct z_erofs_decompress_req { struct super_block *sb; struct page **in, **out; - unsigned short pageofs_in, pageofs_out; unsigned int inputsize, outputsize; - /* indicate the algorithm will be used for decompression */ - unsigned int alg; + unsigned int alg; /* the algorithm for decompression */ bool inplace_io, partial_decoding, fillgaps; + gfp_t gfp; /* allocation flags for extra temporary buffers */ }; struct z_erofs_decompressor { diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 662d550256cf..5203f399bb4e 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -111,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + victim = erofs_allocpage(pagepool, rq->gfp); + if (!victim) + return -ENOMEM; set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); } rq->out[i] = victim; diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 4a64a9c91dd3..b98872058abe 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -95,7 +95,7 @@ int z_erofs_load_deflate_config(struct super_block *sb, } int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -158,8 +158,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); outsz -= strm->z.avail_out; if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + kout = NULL; + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -211,8 +215,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -230,7 +237,7 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, break; } } - +failed: if (zlib_inflateEnd(&strm->z) != Z_OK && !err) err = -EIO; if (kout) diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 2dd14f99c1dc..6ca357d83cfa 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -148,7 +148,7 @@ int z_erofs_load_lzma_config(struct super_block *sb, } int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -215,8 +215,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, PAGE_SIZE - pageofs); outlen -= strm->buf.out_size; if (!rq->out[no] && rq->fillgaps) { /* deduped */ - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -258,8 +261,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -277,6 +283,7 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, break; } } +failed: if (no < nrpages_out && strm->buf.out) kunmap(rq->out[no]); if (ni < nrpages_in) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c1c77166b30f..ff0aa72b0db3 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -82,6 +82,9 @@ struct z_erofs_pcluster { /* L: indicate several pageofs_outs or not */ bool multibases; + /* L: whether extra buffer allocations are best-effort */ + bool besteffort; + /* A: compressed bvecs (can be cached or inplaced pages) */ struct z_erofs_bvec compressed_bvecs[]; }; @@ -960,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, } static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page) + struct page *page, bool ra) { struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; @@ -1010,6 +1013,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, err = z_erofs_pcluster_begin(fe); if (err) goto out; + fe->pcl->besteffort |= !ra; } /* @@ -1276,6 +1280,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, .inplace_io = overlapped, .partial_decoding = pcl->partial, .fillgaps = pcl->multibases, + .gfp = pcl->besteffort ? + GFP_KERNEL | __GFP_NOFAIL : + GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); /* must handle all compressed pages before actual file pages */ @@ -1318,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, pcl->length = 0; pcl->partial = true; pcl->multibases = false; + pcl->besteffort = false; pcl->bvset.nextpage = NULL; pcl->vcnt = 0; @@ -1787,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, if (PageUptodate(page)) unlock_page(page); else - (void)z_erofs_do_read_page(f, page); + (void)z_erofs_do_read_page(f, page, !!rac); put_page(page); } @@ -1808,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; z_erofs_pcluster_readmore(&f, NULL, true); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, false); z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); @@ -1849,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac) folio = head; head = folio_get_private(folio); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, true); if (err && err != -EINTR) erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", folio->index, EROFS_I(inode)->nid); -- Gitee From 835425d24f9afe1748727f0d23a69033a75360ea Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 4 Mar 2024 11:53:39 +0800 Subject: [PATCH 243/953] erofs: fix uninitialized page cache reported by KMSAN ANBZ: #8524 commit 893e5e9b7369a02e7ceaa6d98db6739162005b03 upstream. syzbot reports a KMSAN reproducer [1] which generates a crafted filesystem image and causes IMA to read uninitialized page cache. Later, (rq->outputsize > rq->inputsize) will be formally supported after either large uncompressed pclusters (> block size) or big lclusters are landed. However, currently there is no way to generate such filesystems by using mkfs.erofs. Thus, let's mark this condition as unsupported for now. [1] https://lore.kernel.org/r/0000000000002be12a0611ca7ff8@google.com Reported-and-tested-by: syzbot+7bc44a489f0ef0670bd5@syzkaller.appspotmail.com Fixes: 1ca01520148a ("erofs: refine z_erofs_transform_plain() for sub-page block support") Reviewed-by: Sandeep Dhavale Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240304035339.425857-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 5203f399bb4e..fce41d4875bf 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -323,7 +323,8 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; u8 *kin; - DBG_BUGON(rq->outputsize > rq->inputsize); + if (rq->outputsize > rq->inputsize) + return -EOPNOTSUPP; if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { cur = bs - (rq->pageofs_out & (bs - 1)); pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; -- Gitee From 1f90219f413c9edbaf67bf93098bbe23f25f9db9 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Tue, 31 Jan 2023 17:33:28 +0800 Subject: [PATCH 244/953] anolis: mm: introduce vm_insert_page(s)_mkspecial ANBZ: #8525 This adds the ability to insert anonymous pages or file pages, used for direct IO or buffer IO respectively, to a user VM. The intention behind this is to facilitate mapping pages in IO requests to user space, which is usually the backend of remote block device. This integrates the advantage of vm_insert_pages (batching the pmd lock), and eliminates the overhead of remap_pfn_range (track_pfn_remap), since the pages to be inserted should always be ram. NOTE that it is the caller's responsibility to ensure the validity of pages to be inserted, i.e., that such pages are used for IO requests. Depending on this premise, such pages can be inserted as special PTE, without increasing the page refcount and mapcount. On the other hand, the special mapping should be carefully managed (e.g., zapped) when the IO request is done. Signed-off-by: Xu Yu Reviewed-by: Gang Deng Acked-by: Joseph Qi Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- include/linux/mm.h | 4 ++ mm/memory.c | 170 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index bf5d0b1b16f4..6193311fae5b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3461,6 +3461,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, diff --git a/mm/memory.c b/mm/memory.c index 78e05d3e9e4a..039a87fcf4cd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2558,6 +2558,176 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long } EXPORT_SYMBOL(vm_iomap_memory); +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static int insert_page_into_pte_locked_mkspecial(struct mm_struct *mm, pte_t *pte, + unsigned long addr, struct page *page, pgprot_t prot) +{ + /* + * The page to be inserted should be either anonymous page or file page. + * + * In general, the anonymous page used in dio should be pinned, while + * the file page used in buffer IO is either locked (read) or writeback + * (sync). On the other hand, file page used in IO metadata read (e.g., + * ext4_get_inode_loc) can be unlocked, and the buffer_head is locked + * instead. + * + * Finally, it is the caller's responsibility to ensure the validity of + * pages to be inserted, i.e., such pages are used for IO requests. + */ + if (!PageAnon(page) && !page_is_file_lru(page)) + return -EINVAL; + + flush_dcache_page(page); + + if (!pte_none(*pte)) + return -EBUSY; + set_pte_at(mm, addr, pte, pte_mkspecial(mk_pte(page, prot))); + return 0; +} + +static int insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte; + spinlock_t *ptl; + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = insert_page_into_pte_locked_mkspecial(mm, pte, addr, page, prot); + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_page_mkspecial(vma, addr, page, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); + +#ifdef pte_index +/* + * insert_pages_mkspecial() amortizes the cost of spinlock operations + * when inserting pages in a loop. Arch *must* define pte_index. + */ +static int insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num, pgprot_t prot) +{ + pmd_t *pmd = NULL; + pte_t *start_pte, *pte; + spinlock_t *pte_lock; + struct mm_struct *const mm = vma->vm_mm; + unsigned long curr_page_idx = 0; + unsigned long remaining_pages_total = *num; + unsigned long pages_to_write_in_pmd; + int ret; +more: + ret = -EFAULT; + pmd = walk_to_pmd(mm, addr); + if (!pmd) + goto out; + + pages_to_write_in_pmd = min_t(unsigned long, + remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); + + /* Allocate the PTE if necessary; takes PMD lock once only. */ + ret = -ENOMEM; + if (pte_alloc(mm, pmd)) + goto out; + + while (pages_to_write_in_pmd) { + int pte_idx = 0; + const int batch_size = min_t(int, pages_to_write_in_pmd, 8); + + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { + int err = insert_page_into_pte_locked_mkspecial(mm, pte, + addr, pages[curr_page_idx], prot); + if (unlikely(err)) { + pte_unmap_unlock(start_pte, pte_lock); + ret = err; + remaining_pages_total -= pte_idx; + goto out; + } + addr += PAGE_SIZE; + ++curr_page_idx; + } + pte_unmap_unlock(start_pte, pte_lock); + pages_to_write_in_pmd -= batch_size; + remaining_pages_total -= batch_size; + } + if (remaining_pages_total) + goto more; + ret = 0; +out: + *num = remaining_pages_total; + return ret; +} +#endif /* pte_index */ + +/* + * vm_insert_pages_mkspecial - variant of vm_insert_pages using insert_pfn. + * + * The main purpose of vm_insert_pages_mkspecial is to combine the advantages of + * vm_insert_pages (batching the pmd lock) and remap_pfn_range_notrack (skipping + * track_pfn_insert). + * + * The caller should ensure the isolation (refcounted, PG_locked, PG_writeback, etc.) + * of @pages, and account for error case where a subset of @pages are mapped. + */ +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ +#ifdef pte_index + const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; + + if (addr < vma->vm_start || end_addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_pages_mkspecial(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err = -EINVAL; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page_mkspecial(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* pte_index */ +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#else +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, -- Gitee From 293c5f8981f0a5f79e8dc8d46957e41318c8a500 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 15:38:29 +0800 Subject: [PATCH 245/953] anolis: uio: add ioctl to uio ANBZ: #8525 In TCMU, if backstore holds its own userspace buffer, for read cmd, the data needs to be copied from userspace buffer to tcmu data area first, and then needs to be copied from tcmu data area to scsi sgl pages again. To solve this problem, add ioctl to uio to let userspace backstore can copy data between scsi sgl pages and its own buffer directly. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/uio/uio.c | 21 +++++++++++++++++++++ include/linux/uio_driver.h | 1 + 2 files changed, 22 insertions(+) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 2d572f6c8ec8..caf9caa4ee73 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -815,6 +815,25 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) return ret; } +static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uio_listener *listener = filp->private_data; + struct uio_device *idev = listener->dev; + long retval = 0; + + mutex_lock(&idev->info_lock); + + if (!idev->info || !idev->info->ioctl) { + retval = -EINVAL; + goto out; + } + + retval = idev->info->ioctl(idev->info, cmd, arg); +out: + mutex_unlock(&idev->info_lock); + return retval; +} + static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, @@ -825,6 +844,8 @@ static const struct file_operations uio_fops = { .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, + .unlocked_ioctl = uio_ioctl, + .compat_ioctl = uio_ioctl, }; static int uio_major_init(void) diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 47c5962b876b..971d172b442f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -109,6 +109,7 @@ struct uio_info { int (*open)(struct uio_info *info, struct inode *inode); int (*release)(struct uio_info *info, struct inode *inode); int (*irqcontrol)(struct uio_info *info, s32 irq_on); + long (*ioctl)(struct uio_info *info, unsigned int cmd, unsigned long arg); }; extern int __must_check -- Gitee From dba5fbd305aa7f6364e6b756316a58c90f7a2bcc Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 15:59:11 +0800 Subject: [PATCH 246/953] anolis: scsi:target: reduce one copy by using uio ioctl ANBZ: #8525 Currently there are two copies between sg, tcmu data area and userspace buffer, if the backstore holds its own userspace buffer, we can use uio ioctl to copy between sg and userspace buffer directly to improve performance. Use tcm_loop and tcmu(backstore is file) to evaluate performance, fio job: fio -filename=/dev/sdb -ioengine=libaio -direct=1 -size=2G -name=1 -thread -runtime=60 -time_based -rw=randread -numjobs=16 -iodepth=16 -bs=128k Without this patch: READ: bw=2511MiB/s (2633MB/s), 154MiB/s-158MiB/s (162MB/s-166MB/s), io=147GiB (158GB), run=60006-60008msec With this patch: READ: bw=2965MiB/s (3110MB/s), 183MiB/s-188MiB/s (192MB/s-197MB/s), io=174GiB (187GB), run=60005-60007msec There is about a 20% performance improvement in this case. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 189 +++++++++++++++++++++++--- include/uapi/linux/target_core_user.h | 10 ++ 2 files changed, 177 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 22cc6cac0ba2..cb1890dc5851 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -123,6 +123,7 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 +#define TCMU_DEV_BIT_BYPASS_DATA_AREA 5 unsigned long flags; struct uio_info uio_info; @@ -644,12 +645,17 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - tcmu_cmd_set_block_cnts(tcmu_cmd); - tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), - GFP_NOIO); - if (!tcmu_cmd->dbi) { - kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); - return NULL; + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + tcmu_cmd_set_block_cnts(tcmu_cmd); + tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), + GFP_NOIO); + if (!tcmu_cmd->dbi) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + } else { + tcmu_cmd->dbi_cnt = 0; + tcmu_cmd->dbi = NULL; } return tcmu_cmd; @@ -1095,16 +1101,19 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (se_cmd->data_direction == DMA_TO_DEVICE || - se_cmd->se_cmd_flags & SCF_BIDI) - scatter_data_area(udev, tcmu_cmd, &iov); - else - tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (se_cmd->data_direction == DMA_TO_DEVICE || + se_cmd->se_cmd_flags & SCF_BIDI) + scatter_data_area(udev, tcmu_cmd, &iov); + else + tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + } entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) + && !test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1368,16 +1377,18 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } - if (se_cmd->se_cmd_flags & SCF_BIDI) { - /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true, read_len); - } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false, read_len); - } else if (se_cmd->data_direction == DMA_TO_DEVICE) { - /* TODO: */ - } else if (se_cmd->data_direction != DMA_NONE) { - pr_warn("TCMU: data direction was %d!\n", - se_cmd->data_direction); + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (se_cmd->se_cmd_flags & SCF_BIDI) { + /* Get Data-In buffer before clean up */ + gather_data_area(udev, cmd, true, read_len); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + gather_data_area(udev, cmd, false, read_len); + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* TODO: */ + } else if (se_cmd->data_direction != DMA_NONE) { + pr_warn("TCMU: data direction was %d!\n", + se_cmd->data_direction); + } } done: @@ -2000,6 +2011,104 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) return 0; } +static long tcmu_do_copy_data(struct tcmu_cmd *tcmu_cmd, + struct iovec __user *uiovec, + unsigned int vcnt, + bool is_copy_to_sgl) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg, *sg; + int i; + unsigned int data_nents; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } else { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } + + ret = import_iovec(is_copy_to_sgl ? ITER_SOURCE : ITER_DEST, + uiovec, vcnt, ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) { + pr_err("import iovec failed.\n"); + return -EFAULT; + } + + for_each_sg(data_sg, sg, data_nents, i) { + if (is_copy_to_sgl) + ret = copy_page_from_iter(sg_page(sg), sg->offset, sg->length, &iter); + else + ret = copy_page_to_iter(sg_page(sg), sg->offset, sg->length, &iter); + if (ret < 0) { + pr_err("copy failed.\n"); + break; + } + } + kfree(iov); + return ret < 0 ? -EFAULT : 0; +} + +static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, + unsigned long arg, + bool is_copy_to_sgl) +{ + struct tcmu_data_xfer __user *uxfer = (struct tcmu_data_xfer __user *)arg; + struct tcmu_data_xfer xfer; + struct tcmu_cmd *tcmu_cmd; + long ret; + + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + return -EINVAL; + + if (copy_from_user(&xfer, uxfer, sizeof(xfer))) + return -EFAULT; + + mutex_lock(&udev->cmdr_lock); + tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); + if (!tcmu_cmd) { + pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + ret = -EFAULT; + goto out; + } + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { + pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); + ret = -EFAULT; + goto out; + } + + ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, + xfer.iov_cnt, is_copy_to_sgl); +out: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long arg) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + long ret; + + switch (cmd) { + case TCMU_IOCTL_CMD_COPY_TO_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, true); + break; + case TCMU_IOCTL_CMD_COPY_FROM_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, false); + break; + default: + ret = -EINVAL; + } + return ret; +} + static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) { struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; @@ -2256,6 +2365,7 @@ static int tcmu_configure_device(struct se_device *dev) info->mmap = tcmu_mmap; info->open = tcmu_open; info->release = tcmu_release; + info->ioctl = tcmu_ioctl; ret = uio_register_device(tcmu_root_device, info); if (ret) @@ -3137,6 +3247,40 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); +static ssize_t tcmu_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, bypass_data_area); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, @@ -3148,6 +3292,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, + &tcmu_attr_bypass_data_area, NULL, }; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index f925a77f19ed..2ce13568f196 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -185,4 +185,14 @@ enum tcmu_genl_attr { }; #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) +struct tcmu_data_xfer { + __u16 cmd_id; + __u16 __pad1; + __u32 iov_cnt; + struct iovec __user *iovec; +}; + +#define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) + #endif -- Gitee From 537380edf857518fbf87d4a57562ddb4485e204b Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Tue, 31 May 2022 14:33:16 +0800 Subject: [PATCH 247/953] anolis: scsi: target: tcmu: Introduce cmd_lock to tcmu_cmd ANBZ: #8525 Currently uio framework's info_lock and tcmu's cmdr_lock force tcmu bypass data area ioctl commands to copy data sequentially, which impacts io throughput greatly, but because tcmu timeout handler may also run in, handle and set cmd expired, then the sg pages in se_cmd can not be accessed any more, we must hold tcmu's cmdr_lock to avoid race between tcmu timeout handler and bypass data area ioctls. To improve this a bit, introduce a cmd_lock per tcmu_cmd, then multiple bypass data area ioctl commands can run concurrently(of course, uio`s info_lock needs to disappear too), also ensure there isn`t race against tcmu timeout handler too. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index cb1890dc5851..bb5969997438 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -195,6 +195,8 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 unsigned long flags; + + struct mutex cmd_lock; }; struct tcmu_tmr { @@ -644,6 +646,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; + mutex_init(&tcmu_cmd->cmd_lock); if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { tcmu_cmd_set_block_cnts(tcmu_cmd); @@ -1533,11 +1536,13 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) if (!time_after_eq(jiffies, cmd->deadline)) return; + mutex_lock(&cmd->cmd_lock); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; se_cmd->priv = NULL; cmd->se_cmd = NULL; + mutex_unlock(&cmd->cmd_lock); pr_debug("Timing out inflight cmd %u on dev %s.\n", cmd->cmd_id, cmd->tcmu_dev->name); @@ -2069,15 +2074,14 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, if (copy_from_user(&xfer, uxfer, sizeof(xfer))) return -EFAULT; - mutex_lock(&udev->cmdr_lock); tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); if (!tcmu_cmd) { pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); - ret = -EFAULT; - goto out; + return -EFAULT; } + mutex_lock(&tcmu_cmd->cmd_lock); if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); ret = -EFAULT; @@ -2087,7 +2091,7 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, xfer.iov_cnt, is_copy_to_sgl); out: - mutex_unlock(&udev->cmdr_lock); + mutex_unlock(&tcmu_cmd->cmd_lock); return ret; } -- Gitee From 5a915b1f4f2139f62ca873581280d5c9314a1143 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 17:15:07 +0800 Subject: [PATCH 248/953] anolis: uio: Replace mutex info_lock with percpu_ref to improve performance ANBZ: #8525 The mutex info_lock was introduced to fix crash after the device is unregistered in commit 57c5f4df0a5a ("uio: fix crash after the device is unregistered"), we can replace it with more powerful percpu-ref to improve performance. Use tcm_loop and tcmu(backstore is file) to evaluate performance, fio job: fio -filename=/dev/sdb -ioengine=libaio -direct=1 -size=2G -name=1 -thread -runtime=60 -time_based -rw=randread -numjobs=16 -iodepth=16 -bs=128k Without this patch: READ: bw=2965MiB/s (3110MB/s), 183MiB/s-188MiB/s (192MB/s-197MB/s), io=174GiB (187GB), run=60005-60007msec With this patch: READ: bw=5823MiB/s (6106MB/s), 338MiB/s-379MiB/s (354MB/s-397MB/s), io=341GiB (366GB), run=60002-60005msec There is about a 100% performance improvement in this case. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/uio/uio.c | 101 +++++++++++++++++++++++++++---------- include/linux/uio_driver.h | 5 +- 2 files changed, 79 insertions(+), 27 deletions(-) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index caf9caa4ee73..ed942097ee33 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -12,6 +12,8 @@ * Base Functions */ +#include +#include #include #include #include @@ -218,7 +220,9 @@ static ssize_t name_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -228,7 +232,7 @@ static ssize_t name_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->name); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(name); @@ -239,7 +243,9 @@ static ssize_t version_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -249,7 +255,7 @@ static ssize_t version_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->version); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(version); @@ -489,16 +495,20 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + ret = -EINVAL; + goto err_infoopen; + } + if (!idev->info) { - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); ret = -EINVAL; goto err_infoopen; } if (idev->info->open) ret = idev->info->open(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) goto err_infoopen; @@ -531,10 +541,12 @@ static int uio_release(struct inode *inode, struct file *filep) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); module_put(idev->owner); kfree(listener); @@ -548,10 +560,12 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_device *idev = listener->dev; __poll_t ret = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info || !idev->info->irq) ret = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) return ret; @@ -577,13 +591,17 @@ static ssize_t uio_read(struct file *filep, char __user *buf, add_wait_queue(&idev->wait, &wait); do { - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + retval = -EINVAL; + break; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); break; } - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); set_current_state(TASK_INTERRUPTIBLE); @@ -631,7 +649,9 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, if (copy_from_user(&irq_on, buf, count)) return -EFAULT; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { retval = -EINVAL; goto out; @@ -650,7 +670,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval ? retval : sizeof(s32); } @@ -675,7 +695,9 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vm_fault_t ret = 0; int mi; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return VM_FAULT_SIGBUS; + if (!idev->info) { ret = VM_FAULT_SIGBUS; goto out; @@ -702,8 +724,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vmf->page = page; out: - mutex_unlock(&idev->info_lock); - + percpu_ref_put(&idev->info_ref); return ret; } @@ -772,7 +793,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; goto out; @@ -811,7 +834,7 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) } out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } @@ -821,7 +844,8 @@ static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct uio_device *idev = listener->dev; long retval = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; if (!idev->info || !idev->info->ioctl) { retval = -EINVAL; @@ -830,7 +854,7 @@ static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = idev->info->ioctl(idev->info, cmd, arg); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval; } @@ -928,6 +952,14 @@ static void uio_device_release(struct device *dev) kfree(idev); } +static void uio_info_free(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->free_done); +} + + /** * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device @@ -958,10 +990,18 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); + ret = percpu_ref_init(&idev->info_ref, uio_info_free, 0, GFP_KERNEL); + if (ret) { + pr_err("percpu_ref init failed!\n"); + kfree(idev); + return ret; + } + init_completion(&idev->confirm_done); + init_completion(&idev->free_done); + ret = uio_get_minor(idev); if (ret) { kfree(idev); @@ -1057,6 +1097,13 @@ int __devm_uio_register_device(struct module *owner, } EXPORT_SYMBOL_GPL(__devm_uio_register_device); +static void uio_confirm_info(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->confirm_done); +} + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities @@ -1073,14 +1120,16 @@ void uio_unregister_device(struct uio_info *info) idev = info->uio_dev; minor = idev->minor; - mutex_lock(&idev->info_lock); + percpu_ref_kill_and_confirm(&idev->info_ref, uio_confirm_info); + wait_for_completion(&idev->confirm_done); + wait_for_completion(&idev->free_done); + /* now, we can set info to NULL */ uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); idev->info = NULL; - mutex_unlock(&idev->info_lock); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 971d172b442f..46e2710985e6 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -16,6 +16,7 @@ #include #include #include +#include struct module; struct uio_map; @@ -74,9 +75,11 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; + struct percpu_ref info_ref; + struct completion confirm_done; + struct completion free_done; }; /** -- Gitee From b1d5981bb769272cc523b834112d64a4a81d49e6 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Wed, 16 Mar 2022 13:40:00 +0800 Subject: [PATCH 249/953] anolis: mm: export zap_page_range_single() ANBZ: #8525 Module target_core_user will use it to implement zero copy feature. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- mm/memory.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/memory.c b/mm/memory.c index 039a87fcf4cd..5bc7632b4cc7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1770,6 +1770,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(&tlb); hugetlb_zap_end(vma, details); } +EXPORT_SYMBOL_GPL(zap_page_range_single); /** * zap_vma_ptes - remove ptes mapping the vma -- Gitee From b1503797f891317ff1c88ddb16ca4f2c1d70ba95 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Tue, 15 Mar 2022 14:40:19 +0800 Subject: [PATCH 250/953] anolis: scsi: target: tcmu: Support zero copy ANBZ: #8525 Currently in tcmu, for READ commands, it copies user space backstore's data buffer to tcmu internal data area, then copies data in data area to READ commands sgl pages. For WRITE commands, tcmu copies sgl pages to tcmu internal data area, then copies data in data area to user space backstore. For both cases, there are obvious copy overhead, which impact io throughput, especially for large io size. To mitigate this issue, we implement zero copy feature to tcmu, which map sgl pages to user space backstore's address space. Currently only sgl pages's offset and length are both aligned to page size, can this command go into tcmu zero copy path. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 285 ++++++++++++++++++++++++-- include/uapi/linux/target_core_user.h | 8 + 2 files changed, 271 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index bb5969997438..545faa0e7a2c 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -73,6 +75,7 @@ */ #define DATA_PAGES_PER_BLK_DEF 1 #define DATA_AREA_PAGES_DEF (256 * 1024) +#define ZC_DATA_AREA_PAGES_DEF (256 * 1024) #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) @@ -140,6 +143,7 @@ struct tcmu_dev { /* Must add data_off and mb_addr to get the address */ size_t data_off; int data_area_mb; + uint32_t zc_max_blocks; uint32_t max_blocks; size_t mmap_pages; @@ -154,6 +158,10 @@ struct tcmu_dev { uint32_t data_pages_per_blk; uint32_t data_blk_size; + uint32_t zc_dbi_max; + uint32_t zc_dbi_thresh; + unsigned long *zc_data_bitmap; + struct xarray commands; struct timer_list cmd_timer; @@ -179,6 +187,12 @@ struct tcmu_cmd { struct tcmu_dev *tcmu_dev; struct list_head queue_entry; + /* for zero_copy */ + struct mm_struct *vma_vm_mm; + struct vm_area_struct *vma; + struct iovec *iov; + int iov_cnt; + uint16_t cmd_id; /* Can't use se_cmd when cleaning up expired cmds, because if @@ -194,6 +208,7 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 +#define TCMU_CMD_BIT_ZEROCOPY 2 unsigned long flags; struct mutex cmd_lock; @@ -500,10 +515,38 @@ static struct genl_family tcmu_genl_family __ro_after_init = { static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned long *data_bitmap; uint32_t i; + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + data_bitmap = udev->zc_data_bitmap; + else + data_bitmap = udev->data_bitmap; + for (i = 0; i < len; i++) - clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); + clear_bit(tcmu_cmd->dbi[i], data_bitmap); +} + +static inline int tcmu_get_zc_empty_block(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, + int prev_dbi, int *iov_cnt) +{ + int dbi; + + dbi = find_first_zero_bit(udev->zc_data_bitmap, udev->zc_dbi_thresh); + if (dbi == udev->zc_dbi_thresh) + return -1; + + if (dbi > udev->zc_dbi_max) + udev->zc_dbi_max = dbi; + + set_bit(dbi, udev->zc_data_bitmap); + tcmu_cmd_set_dbi(tcmu_cmd, dbi); + + if (dbi != prev_dbi + 1) + *iov_cnt += 1; + + return dbi; } static inline int tcmu_get_empty_block(struct tcmu_dev *udev, @@ -555,7 +598,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, } static int tcmu_get_empty_blocks(struct tcmu_dev *udev, - struct tcmu_cmd *tcmu_cmd, int length) + struct tcmu_cmd *tcmu_cmd, int length, + bool zero_copy) { /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; @@ -564,7 +608,10 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, for (; length > 0; length -= blk_size) { blk_data_len = min_t(uint32_t, length, blk_size); - dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + if (zero_copy) + dbi = tcmu_get_zc_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); + else + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, &iov_cnt); if (dbi < 0) return -1; @@ -572,8 +619,40 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, return iov_cnt; } +static void tcmu_cmd_zerocopy_unmap(struct tcmu_cmd *cmd) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + struct iovec *iov = cmd->iov; + unsigned long address; + int i; + + mm = cmd->vma_vm_mm; + vma = cmd->vma; + if (!mm) + return; + + if (mmget_not_zero(mm)) { + mmap_read_lock(mm); + for (i = 0; i < cmd->iov_cnt; i++) { + address = (unsigned long)iov->iov_base; + zap_page_range_single(vma, address, iov->iov_len, NULL); + iov++; + } + mmap_read_unlock(mm); + mmput(mm); + } + + cmd->vma_vm_mm = NULL; + cmd->vma = NULL; + mmdrop(mm); + kfree(cmd->iov); +} + static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + tcmu_cmd_zerocopy_unmap(tcmu_cmd); kfree(tcmu_cmd->dbi); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); } @@ -861,37 +940,51 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) * Called with ring lock held. */ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - int *iov_bidi_cnt) + int *iov_bidi_cnt, bool zero_copy) { int space, iov_cnt = 0, ret = 0; + unsigned long *data_bitmap; + uint32_t *dbi_thresh, max_blocks; if (!cmd->dbi_cnt) goto wr_iov_cnts; + if (zero_copy) { + data_bitmap = udev->zc_data_bitmap; + dbi_thresh = &udev->zc_dbi_thresh; + max_blocks = udev->zc_max_blocks; + } else { + data_bitmap = udev->data_bitmap; + dbi_thresh = &udev->dbi_thresh; + max_blocks = udev->max_blocks; + } + /* try to check and get the data blocks as needed */ - space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); + space = spc_bitmap_free(data_bitmap, *dbi_thresh); if (space < cmd->dbi_cnt) { - unsigned long blocks_left = - (udev->max_blocks - udev->dbi_thresh) + space; + unsigned long blocks_left = max_blocks - *dbi_thresh + space; if (blocks_left < cmd->dbi_cnt) { - pr_debug("no data space: only %lu available, but ask for %u\n", + pr_debug("no data space[%s]: only %lu available, but ask for %u\n", ++ zero_copy ? "zero copy" : "non zero copy", blocks_left * udev->data_blk_size, cmd->dbi_cnt * udev->data_blk_size); return -1; } - udev->dbi_thresh += cmd->dbi_cnt; - if (udev->dbi_thresh > udev->max_blocks) - udev->dbi_thresh = udev->max_blocks; + *dbi_thresh += cmd->dbi_cnt; + if (*dbi_thresh > max_blocks) + *dbi_thresh = max_blocks; } - iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length, + zero_copy); if (iov_cnt < 0) return -1; if (cmd->dbi_bidi_cnt) { - ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi, + zero_copy); if (ret < 0) return -1; } @@ -1032,6 +1125,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; + bool zero_copy = false; *scsi_err = TCM_NO_SENSE; @@ -1055,7 +1149,22 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); + if (!(se_cmd->se_cmd_flags & SCF_BIDI) && se_cmd->data_length && + IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) { + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + zero_copy = true; + } + + iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1105,7 +1214,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) iov = &entry->req.iov[0]; if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (se_cmd->data_direction == DMA_TO_DEVICE || + if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || se_cmd->se_cmd_flags & SCF_BIDI) scatter_data_area(udev, tcmu_cmd, &iov); else @@ -1125,6 +1234,19 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); entry->hdr.cmd_id = tcmu_cmd->cmd_id; + if (zero_copy) { + int i; + struct iovec *tiov; + + tiov = &entry->req.iov[0]; + for (i = 0; i < entry->req.iov_cnt; i++) { + tiov->iov_base = tiov->iov_base + + (TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT); + tiov++; + } + entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + } tcmu_hdr_set_len(&entry->hdr.len_op, command_size); @@ -1381,7 +1503,9 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + } else if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ gather_data_area(udev, cmd, true, read_len); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -1537,6 +1661,8 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) return; mutex_lock(&cmd->cmd_lock); + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) + tcmu_cmd_zerocopy_unmap(cmd); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; @@ -1635,6 +1761,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->zc_max_blocks = ZC_DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; udev->cmdr_size = CMDR_SIZE_DEF; udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); @@ -1756,6 +1883,7 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_blocks_release(udev, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); + bitmap_free(udev->zc_data_bitmap); mutex_unlock(&udev->cmdr_lock); pr_debug("dev_kref_release\n"); @@ -1944,7 +2072,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); vma->vm_ops = &tcmu_vm_ops; vma->vm_private_data = udev; @@ -1958,6 +2086,109 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) return 0; } +#define TCMU_ZEROCOPY_PAGE_BATCH 32 + +static inline int tcmu_zerocopy_one_seg(struct iovec *iov, + struct vm_area_struct *vma, + struct sg_page_iter *sgiter) +{ + struct page *pages[TCMU_ZEROCOPY_PAGE_BATCH]; + unsigned int len = iov->iov_len; + unsigned long address = (unsigned long)iov->iov_base; + unsigned long pages_remaining, pg_index = 0; + struct page *page; + int ret; + + while (len > 0) { + __sg_page_iter_next(sgiter); + page = sg_page_iter_page(sgiter); + pages[pg_index++] = page; + len -= PAGE_SIZE; + if (pg_index == TCMU_ZEROCOPY_PAGE_BATCH || !len) { + pages_remaining = pg_index; + ret = vm_insert_pages_mkspecial(vma, address, pages, + &pages_remaining); + if (ret < 0) { + pr_err("vm insert pages failed, error code: %d\n", ret); + return ret; + } + address = address + pg_index * PAGE_SIZE; + pg_index = 0; + } + } + + return 0; +} + +long tcmu_ioctl_cmd_zerocopy(struct tcmu_dev *udev, unsigned long arg) +{ + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct scatterlist *data_sg; + unsigned int data_nents; + struct tcmu_cmd_zerocopy zc; + struct iovec *iov, *tiov; + struct sg_page_iter sgiter; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int i, ret = 0; + + if (copy_from_user(&zc, (struct tcmu_cmd_zerocopy __user *)arg, sizeof(zc))) + return -EFAULT; + + if (zc.iov_cnt <= 0) + return -EINVAL; + + iov = kmalloc_array(zc.iov_cnt, sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return -ENOMEM; + if (copy_from_user(iov, zc.iov, sizeof(struct iovec) * zc.iov_cnt)) { + kfree(iov); + return -EFAULT; + } + + mutex_lock(&udev->cmdr_lock); + mmap_read_lock(mm); + cmd = xa_load(&udev->commands, zc.cmd_id); + if (!cmd) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: cmd_id %d not found\n", zc.cmd_id); + goto out; + } + se_cmd = cmd->se_cmd; + + vma = find_vma(current->mm, (unsigned long)iov->iov_base); + if (!vma) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: invalid iov_base\n"); + goto out; + } + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + __sg_page_iter_start(&sgiter, data_sg, data_nents, 0); + tiov = iov; + for (i = 0; i < zc.iov_cnt; i++) { + ret = tcmu_zerocopy_one_seg(tiov, vma, &sgiter); + if (ret < 0) { + kfree(iov); + goto out; + } + tiov++; + } + + cmd->iov = iov; + cmd->iov_cnt = zc.iov_cnt; + cmd->vma_vm_mm = vma->vm_mm; + cmd->vma = vma; + mmgrab(cmd->vma_vm_mm); +out: + mmap_read_unlock(mm); + mutex_unlock(&udev->cmdr_lock); + return ret; +} + static int tcmu_open(struct uio_info *info, struct inode *inode) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); @@ -2107,6 +2338,9 @@ static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long ar case TCMU_IOCTL_CMD_COPY_FROM_SGL: ret = tcmu_bypass_data_area_copy_data(udev, arg, false); break; + case TCMU_IOCTL_CMD_ZEROCOPY: + ret = tcmu_ioctl_cmd_zerocopy(udev, arg); + break; default: ret = -EINVAL; } @@ -2313,6 +2547,7 @@ static int tcmu_configure_device(struct se_device *dev) struct uio_info *info; struct tcmu_mailbox *mb; size_t data_size; + size_t zc_data_size; int ret = 0; ret = tcmu_update_uio_info(udev); @@ -2323,10 +2558,11 @@ static int tcmu_configure_device(struct se_device *dev) mutex_lock(&udev->cmdr_lock); udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); + udev->zc_data_bitmap = bitmap_zalloc(udev->zc_max_blocks, GFP_KERNEL); mutex_unlock(&udev->cmdr_lock); - if (!udev->data_bitmap) { + if (!udev->data_bitmap || !udev->zc_data_bitmap) { ret = -ENOMEM; - goto err_bitmap_alloc; + goto err_vzalloc; } mb = vzalloc(udev->cmdr_size + CMDR_OFF); @@ -2340,9 +2576,12 @@ static int tcmu_configure_device(struct se_device *dev) udev->cmdr = (void *)mb + CMDR_OFF; udev->data_off = udev->cmdr_size + CMDR_OFF; data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; - udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; + zc_data_size = (udev->zc_max_blocks * udev->data_pages_per_blk) << PAGE_SHIFT; + udev->mmap_pages = (data_size + zc_data_size + udev->cmdr_size + + CMDR_OFF) >> PAGE_SHIFT; udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ + udev->zc_dbi_thresh = 0; /* Default in Idle state */ /* Initialise the mailbox of the ring buffer */ mb->version = TCMU_MAILBOX_VERSION; @@ -2360,7 +2599,8 @@ static int tcmu_configure_device(struct se_device *dev) info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; + info->mem[0].size = data_size + zc_data_size + + udev->cmdr_size + CMDR_OFF; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -2416,7 +2656,8 @@ static int tcmu_configure_device(struct se_device *dev) err_vzalloc: bitmap_free(udev->data_bitmap); udev->data_bitmap = NULL; -err_bitmap_alloc: + kfree(udev->zc_data_bitmap); + udev->zc_data_bitmap = NULL; kfree(info->name); info->name = NULL; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 2ce13568f196..eba0cac0c8d2 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -73,6 +73,7 @@ enum tcmu_opcode { struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; +#define TCMU_KFLAG_ZERO_COPY 0x1 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 @@ -194,5 +195,12 @@ struct tcmu_data_xfer { #define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) #define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_ZEROCOPY _IOW('T', 0xe2, struct tcmu_cmd_zerocopy) + +struct tcmu_cmd_zerocopy { + struct iovec __user *iov; + __u32 iov_cnt; + __u16 cmd_id; +}; #endif -- Gitee From 9716bd8ef3aa4c5c874853468c677baa01dfff83 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Wed, 30 Mar 2022 13:42:06 +0800 Subject: [PATCH 251/953] anolis: scsi: target: tcmu: make zero copy and bypass data area configurable ANBZ: #8525 Add configfs file read_zc_size, write_zc_size, read_bypass_data_area, and write_bypass_data_area to control which cmd to bypass data area or zero copy. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 248 +++++++++++++++++++++----- include/uapi/linux/target_core_user.h | 1 + 2 files changed, 202 insertions(+), 47 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 545faa0e7a2c..81e29f8907c1 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -126,7 +126,8 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 -#define TCMU_DEV_BIT_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_READ_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA 6 unsigned long flags; struct uio_info uio_info; @@ -161,6 +162,8 @@ struct tcmu_dev { uint32_t zc_dbi_max; uint32_t zc_dbi_thresh; unsigned long *zc_data_bitmap; + uint32_t read_zc_size; + uint32_t write_zc_size; struct xarray commands; @@ -209,6 +212,7 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 #define TCMU_CMD_BIT_ZEROCOPY 2 +#define TCMU_CMD_BIT_BYPASS_DATA_AREA 3 unsigned long flags; struct mutex cmd_lock; @@ -712,11 +716,67 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } +static void tcmu_set_cmd_bypass_data_area(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + /* + * Zero copy is map sg pages to userspace, and bypass data area + * is copy data between sg pages and userspace buffer, so they + * are completely different. + */ + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + return; + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + + if (se_cmd->data_direction == DMA_TO_DEVICE && + test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); +} + +static void tcmu_set_cmd_do_zero_copy(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + if ((se_cmd->se_cmd_flags & SCF_BIDI) || !se_cmd->data_length || + !IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) + return; + + if ((se_cmd->data_direction == DMA_FROM_DEVICE) && + (!udev->read_zc_size || + se_cmd->data_length < (udev->read_zc_size << 10))) + return; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (!udev->write_zc_size || + se_cmd->data_length < (udev->write_zc_size << 10))) + return; + + /* Now, check every sg pages is aligned. */ + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); +} + static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; + bool zero_copy; + bool bypass_data_area; tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); if (!tcmu_cmd) @@ -727,7 +787,12 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->tcmu_dev = udev; mutex_init(&tcmu_cmd->cmd_lock); - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + tcmu_set_cmd_do_zero_copy(tcmu_cmd); + tcmu_set_cmd_bypass_data_area(tcmu_cmd); + + zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + if (zero_copy || !bypass_data_area) { tcmu_cmd_set_block_cnts(tcmu_cmd); tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), GFP_NOIO); @@ -946,7 +1011,7 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, unsigned long *data_bitmap; uint32_t *dbi_thresh, max_blocks; - if (!cmd->dbi_cnt) + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) goto wr_iov_cnts; if (zero_copy) { @@ -1125,7 +1190,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; - bool zero_copy = false; + bool zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bool bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); *scsi_err = TCM_NO_SENSE; @@ -1149,21 +1215,6 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - if (!(se_cmd->se_cmd_flags & SCF_BIDI) && se_cmd->data_length && - IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) { - struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; - unsigned int data_nents = se_cmd->t_data_nents; - int i; - - for_each_sg(data_sg, sg, data_nents, i) { - if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || - !IS_ALIGNED(sg->length, PAGE_SIZE)) - break; - } - if (i == data_nents) - zero_copy = true; - } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1213,7 +1264,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (zero_copy || !bypass_data_area) { if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || se_cmd->se_cmd_flags & SCF_BIDI) scatter_data_area(udev, tcmu_cmd, &iov); @@ -1224,8 +1275,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if ((se_cmd->se_cmd_flags & SCF_BIDI) - && !test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) && !bypass_data_area) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1245,9 +1295,11 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tiov++; } entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; - set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); } + if (bypass_data_area) + entry->hdr.kflags |= TCMU_KFLAG_BYPASS_DATA_AREA; + tcmu_hdr_set_len(&entry->hdr.len_op, command_size); /* All offsets relative to mb_addr, not start of entry! */ @@ -1502,20 +1554,25 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { - tcmu_cmd_zerocopy_unmap(cmd); - } else if (se_cmd->se_cmd_flags & SCF_BIDI) { - /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true, read_len); - } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false, read_len); - } else if (se_cmd->data_direction == DMA_TO_DEVICE) { - /* TODO: */ - } else if (se_cmd->data_direction != DMA_NONE) { - pr_warn("TCMU: data direction was %d!\n", - se_cmd->data_direction); - } + + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + goto done; + } + + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) + goto done; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + /* Get Data-In buffer before clean up */ + gather_data_area(udev, cmd, true, read_len); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + gather_data_area(udev, cmd, false, read_len); + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* TODO: */ + } else if (se_cmd->data_direction != DMA_NONE) { + pr_warn("TCMU: data direction was %d!\n", + se_cmd->data_direction); } done: @@ -1766,6 +1823,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); mutex_init(&udev->cmdr_lock); + udev->read_zc_size = 0; + udev->write_zc_size = 0; INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -2299,9 +2358,6 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd; long ret; - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) - return -EINVAL; - if (copy_from_user(&xfer, uxfer, sizeof(xfer))) return -EFAULT; @@ -2313,6 +2369,11 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, } mutex_lock(&tcmu_cmd->cmd_lock); + if (!test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags)) { + ret = -EINVAL; + goto out; + } + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); ret = -EFAULT; @@ -3492,19 +3553,19 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); -static ssize_t tcmu_bypass_data_area_show(struct config_item *item, char *page) +static ssize_t tcmu_read_bypass_data_area_show(struct config_item *item, char *page) { struct se_dev_attrib *da = container_of(to_config_group(item), struct se_dev_attrib, da_group); struct tcmu_dev *udev = TCMU_DEV(da->da_dev); - if (test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + if (test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) return snprintf(page, PAGE_SIZE, "%s\n", "true"); else return snprintf(page, PAGE_SIZE, "%s\n", "false"); } -static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char *page, +static ssize_t tcmu_read_bypass_data_area_store(struct config_item *item, const char *page, size_t count) { struct se_dev_attrib *da = container_of(to_config_group(item), @@ -3518,13 +3579,103 @@ static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char return ret; if (bypass_data_area) - set_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + set_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); else - clear_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + clear_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, read_bypass_data_area); + +static ssize_t tcmu_write_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_write_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, write_bypass_data_area); + +static ssize_t tcmu_read_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->read_zc_size); +} + +static ssize_t tcmu_read_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t read_zc_size; + int ret; + + ret = kstrtou32(page, 0, &read_zc_size); + if (ret < 0) + return ret; + + udev->read_zc_size = read_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, read_zc_size); + +static ssize_t tcmu_write_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->write_zc_size); +} + +static ssize_t tcmu_write_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t write_zc_size; + int ret; + + ret = kstrtou32(page, 0, &write_zc_size); + if (ret < 0) + return ret; + + udev->write_zc_size = write_zc_size; return count; } -CONFIGFS_ATTR(tcmu_, bypass_data_area); +CONFIGFS_ATTR(tcmu_, write_zc_size); static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, @@ -3537,7 +3688,10 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, - &tcmu_attr_bypass_data_area, + &tcmu_attr_read_bypass_data_area, + &tcmu_attr_write_bypass_data_area, + &tcmu_attr_read_zc_size, + &tcmu_attr_write_zc_size, NULL, }; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index eba0cac0c8d2..8931c2bb0afe 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -74,6 +74,7 @@ struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; #define TCMU_KFLAG_ZERO_COPY 0x1 +#define TCMU_KFLAG_BYPASS_DATA_AREA 0x2 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 -- Gitee From ea79ab1428deb4e1d4c4b96e5a822c6430786372 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Tue, 15 Mar 2022 15:57:57 +0800 Subject: [PATCH 252/953] anolis: scsi: target: tcmu: use new rw_semaphore to protect truncate ANBZ: #8525 Currently tcmu_vma_fault() uses udev->cmdr_lock to avoid concurrent find_free_blocks(), which unmaps idle pages and truncates them. This work is really like many filesystem's truncate operations, but they use inode's i_mmap_sem to protect race normally. This patch replaces cmdr_lock with a new rw_semaphore in tcmu fault procedure, which will also make page-fault have concurrency. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 81e29f8907c1..16e0b27d08ea 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -149,6 +149,7 @@ struct tcmu_dev { size_t mmap_pages; struct mutex cmdr_lock; + struct rw_semaphore i_mmap_sem; struct list_head qfull_queue; struct list_head tmr_queue; @@ -1825,6 +1826,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) mutex_init(&udev->cmdr_lock); udev->read_zc_size = 0; udev->write_zc_size = 0; + init_rwsem(&udev->i_mmap_sem); INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -2043,12 +2045,12 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) { struct page *page; - mutex_lock(&udev->cmdr_lock); + down_read(&udev->i_mmap_sem); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { get_page(page); lock_page(page); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return page; } @@ -2058,7 +2060,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) */ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", dpi, udev->name); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return NULL; } @@ -3756,6 +3758,7 @@ static void find_free_blocks(void) continue; } + down_write(&udev->i_mmap_sem); end = udev->dbi_max + 1; block = find_last_bit(udev->data_bitmap, end); if (block == udev->dbi_max) { @@ -3763,6 +3766,7 @@ static void find_free_blocks(void) * The last bit is dbi_max, so it is not possible * reclaim any blocks. */ + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); continue; } else if (block == end) { @@ -3790,6 +3794,7 @@ static void find_free_blocks(void) off = udev->data_off + (loff_t)start * udev->data_blk_size; unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); total_pages_freed += pages_freed; -- Gitee From cd4d54e587242d3598808bc3381517976dcd91a4 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 21 Feb 2024 17:24:49 +0800 Subject: [PATCH 253/953] anolis: tcm_loop: allow sg_tablesize to be settable ANBZ: #8523 Currently tcm_loop default sg_tablesize is 256 and can only support aximum 1M io in the worst case, e.g. each segment takes only 4k. This won't fulfill requirement in some user scenarios. Just like "scsi: tcm_loop: Allow queues, can_queue and cmd_per_lun to be settable", make 'sg_tablesize' also can be settable by user. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Reviewed-by: Gao Xiang Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2879 --- drivers/target/loopback/tcm_loop.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4ec99a55ac30..8ec4667c48d4 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -54,6 +54,9 @@ module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); static unsigned int tcm_loop_cmd_per_lun = 1024; module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); +static unsigned short tcm_loop_sg_tablesize = 256; +module_param_named(sg_tablesize, tcm_loop_sg_tablesize, ushort, 0644); + /* * Called from struct target_core_fabric_ops->check_stop_free() */ @@ -301,7 +304,6 @@ static const struct scsi_host_template tcm_loop_driver_template = { .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, .this_id = -1, - .sg_tablesize = 256, .max_sectors = 0xFFFF, .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, @@ -339,6 +341,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh->nr_hw_queues = tcm_loop_nr_hw_queues; sh->can_queue = tcm_loop_can_queue; sh->cmd_per_lun = tcm_loop_cmd_per_lun; + sh->sg_tablesize = tcm_loop_sg_tablesize; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | -- Gitee From 4e898cbe7bbbc4ce459a6ac9acb85bdce2768502 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 17 Jan 2024 11:14:39 +0800 Subject: [PATCH 254/953] anolis: Revert "block: always define BIO_MAX_PAGES as 256" ANBZ: #8523 This reverts commit 6861428921b51113520cd47897be6c2774e4fc58. In some user scenarios, we want to write a big IO (e.g. 2M) and also expect it not to split. But in the worst case, each bio_vec only contains single page, so it can only support maximum 1M bio actually. Revert BIO_MAX_PAGES back to 512 to fulfill the above user scenarios. This is also to keep consistent with kernel 4.19. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2879 --- include/linux/bio.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/bio.h b/include/linux/bio.h index efb40c3282ca..797e17573e71 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -10,7 +10,15 @@ #include #include +#ifdef CONFIG_THP_SWAP +#if HPAGE_PMD_NR > 256 +#define BIO_MAX_VECS (HPAGE_PMD_NR * 1U) +#else #define BIO_MAX_VECS 256U +#endif +#else +#define BIO_MAX_VECS 256U +#endif struct queue_limits; -- Gitee From 4d87592753a182cda334c257ac7425cd14b22637 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 7 Mar 2024 16:10:15 +0800 Subject: [PATCH 255/953] anolis: Add support for Zhaoxin Serial ATA IDE. ANBZ: #7809 With this driver, Serial ATA device can run in IDE mode on Zhaoxin CPUs. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2684 --- drivers/ata/Kconfig | 9 + drivers/ata/Makefile | 1 + drivers/ata/sata_zhaoxin.c | 390 +++++++++++++++++++++++++++++++++++++ 3 files changed, 400 insertions(+) create mode 100644 drivers/ata/sata_zhaoxin.c diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 42b51c9812a0..0fd5a5bce3e4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -553,6 +553,15 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "ZhaoXin SATA support" + depends on PCI + select SATA_HOST + help + This option enables support for ZhaoXin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 20e6645ab737..4b846692e365 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 000000000000..53c3e2ab6095 --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +#define PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL 9002 +#define PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL 9003 + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL), zx100s }, + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL), zx100s }, + + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + + +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", + rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + + return 0; + + default: + return -EINVAL; + } +} + + +/** + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL || + pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, + "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", + i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); -- Gitee From 80330884b37127a5d25606dad1a164d693c54db4 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 7 Mar 2024 16:58:15 +0800 Subject: [PATCH 256/953] anolis: Add support for Zhaoxin HW Random Number Generator ANBZ: #7809 This driver provides kernel-side support for the Random Number Generator hardware found on Zhaoxin based motherboards. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2685 --- drivers/char/hw_random/Kconfig | 13 ++++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/via-rng.c | 10 +-- drivers/char/hw_random/zhaoxin-rng.c | 98 ++++++++++++++++++++++++++++ 4 files changed, 117 insertions(+), 5 deletions(-) create mode 100644 drivers/char/hw_random/zhaoxin-rng.c diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8de74dcfa18c..7c486989dd04 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -152,6 +152,19 @@ config HW_RANDOM_VIA If unsure, say Y. +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on ARCH_IXP4XX || COMPILE_TEST diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc..ef5b3ae0794d 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a9a0a3b09c8b..4288e1114fc9 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -35,7 +35,7 @@ #include #include - +static struct x86_cpu_id via_rng_cpu_id[]; enum { @@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -196,7 +196,7 @@ static int __init via_rng_mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_cpu_id)) return -ENODEV; pr_info("VIA RNG detected\n"); @@ -217,8 +217,8 @@ static void __exit via_rng_mod_exit(void) } module_exit(via_rng_mod_exit); -static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), +static struct x86_cpu_id via_rng_cpu_id[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 000000000000..f0bfda78fea1 --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "2.0.0" + +enum { + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), +}; + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + + return 0; +} + +static inline int rep_xstore(size_t size, size_t factor, void *result) +{ + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); + + return 0; +} + +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; + + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); + + return max; +} + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, +}; + +static struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); + +static int __init zhaoxin_rng_mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) { + pr_err(PFX "The CPU isn't support XSTORE.\n"); + return -ENODEV; + } + + pr_info("Zhaoxin RNG detected\n"); + + err = hwrng_register(&zhaoxin_rng); + if (err) + pr_err(PFX "RNG registering failed (%d)\n", err); + + return err; +} +module_init(zhaoxin_rng_mod_init); + +static void __exit zhaoxin_rng_mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} +module_exit(zhaoxin_rng_mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); -- Gitee From 361b57653ce18babb29d9e57fb7bac11d7df7902 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 15 Mar 2024 11:10:04 +0800 Subject: [PATCH 257/953] anolis: hwmon: Add support for Zhaoxin core temperature monitoring ANBZ: #8437 Add support for the temperature sensor inside CPU. Supported are all known variants of the Zhaoxin processors. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2821 --- MAINTAINERS | 6 + drivers/hwmon/Kconfig | 13 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/via-cputemp.c | 1 - drivers/hwmon/zhaoxin-cputemp.c | 305 ++++++++++++++++++++++++++++++++ 5 files changed, 325 insertions(+), 1 deletion(-) create mode 100644 drivers/hwmon/zhaoxin-cputemp.c diff --git a/MAINTAINERS b/MAINTAINERS index 5ecb213b4ef4..f3f654630e1d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23885,6 +23885,12 @@ F: arch/x86/events/zhaoxin/core.c F: arch/x86/events/zhaoxin/uncore.c F: arch/x86/events/zhaoxin/uncore.h +ZHAOXIN TEMPERATURE MONITORING DRIVERS +M: Leoliu-oc +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/zhaoxin-cputemp.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index ec38c8892158..b9f3e18a3fda 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -2161,6 +2161,19 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + default m + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + + This driver can also be built as a module. If so, the module + will be called zhaoxin-cputemp. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 4ac9452b5430..cab312e74d3c 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -211,6 +211,7 @@ obj-$(CONFIG_SENSORS_TMP464) += tmp464.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP)+= zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index e5d18dac8ee7..0a5057dbe51a 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -273,7 +273,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 000000000000..751d2c5a868a --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME, SHOW_CRIT, SHOW_MAX }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u32 id; + u32 msr_temp; + u32 msr_crit; + u32 msr_max; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t crit_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_crit, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static ssize_t max_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_max, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); +static SENSOR_DEVICE_ATTR_RO(temp1_crit, crit, SHOW_CRIT); +static SENSOR_DEVICE_ATTR_RO(temp1_max, max, SHOW_MAX); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + struct cpuinfo_x86 *c = &cpu_data(pdev->id); + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + data->msr_temp = 0x1423; + if (c->x86_model == 0x6b) { + data->msr_crit = 0x175b; + data->msr_max = 0x175a; + } else { + data->msr_crit = 0x1416; + data->msr_max = 0x1415; + } + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + data->hwmon_dev = hwmon_device_register_for_thermal(&pdev->dev, data->name, data); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id cputemp_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(HWMON_THERMAL); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) -- Gitee From ae9fd8a3f373dade2cb8b04532013f4d3d37053c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 5 Mar 2024 18:21:30 +0800 Subject: [PATCH 258/953] anolis: configs: enabled zhaoxin-cputemp as module ANBZ: #8437 Set zhaoxin cputemp driver as module: CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2821 --- arch/x86/configs/anolis_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 3bafc6bbc1fe..38b641e976d0 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -4232,6 +4232,7 @@ CONFIG_SENSORS_TMP421=m # CONFIG_SENSORS_TMP464 is not set # CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m -- Gitee From 82e26b01d99a5e4836592bfdcbac90b732e888fa Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 4 Mar 2024 10:12:17 +0800 Subject: [PATCH 259/953] anolis: USB:Fix kernel NULL pointer when unbind UHCI form vfio-pci ANBZ: #7809 This bug is found in Zhaoxin platform, but it's a commom code bug. Fail sequence: step1: Unbind UHCI controller from native driver; step2: Bind UHCI controller to vfio-pci, which will put UHCI controller in one vfio group's device list and set UHCI's dev->driver_data to struct vfio-pci(for UHCI) step3: Unbind EHCI controller from native driver, will try to tell UHCI native driver that "I'm removed by set companion_hcd->self.hs_companion to NULL. However, companion_hcd get from UHCI's dev->driver_data that has modified by vfio-pci already. So, the vfio-pci structure will be damaged! step4: Bind EHCI controller to vfio-pci driver, which will put EHCI controller in the same vfio group as UHCI controller; ... ... step5: Unbind UHCI controller from vfio-pci, which will delete UHCI from vfio group device list that has been damaged in step 3. So, delete operation can random result into a NULL pointer dereference with the below stack dump. step6: Bind UHCI controller to native driver; step7: Unbind EHCI controller from vfio-pci, which will try to remove EHCI controller from the vfio group; step8: Bind EHCI controller to native driver; [ 929.114641] uhci_hcd 0000:00:10.0: remove, state 1 [ 929.114652] usb usb1: USB disconnect, device number 1 [ 929.114655] usb 1-1: USB disconnect, device number 2 [ 929.270313] usb 1-2: USB disconnect, device number 3 [ 929.318404] uhci_hcd 0000:00:10.0: USB bus 1 deregistered [ 929.343029] uhci_hcd 0000:00:10.1: remove, state 4 [ 929.343045] usb usb3: USB disconnect, device number 1 [ 929.343685] uhci_hcd 0000:00:10.1: USB bus 3 deregistered [ 929.369087] ehci-pci 0000:00:10.7: remove, state 4 [ 929.369102] usb usb4: USB disconnect, device number 1 [ 929.370325] ehci-pci 0000:00:10.7: USB bus 4 deregistered [ 932.398494] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [ 932.398496] PGD 42a67d067 P4D 42a67d067 PUD 42a65f067 PMD 0 [ 932.398502] Oops: 0002 [#2] SMP NOPTI [ 932.398505] CPU: 2 PID: 7824 Comm: vfio_unbind.sh Tainted: P D 4.19.65-2020051917-rainos #1 [ 932.398506] Hardware name: Shanghai Zhaoxin Semiconductor Co., Ltd. HX002EH/HX002EH, BIOS HX002EH0_01_R480_R_200408 04/08/2020 [ 932.398513] RIP: 0010:vfio_device_put+0x31/0xa0 [vfio] [ 932.398515] Code: 89 e5 41 54 53 4c 8b 67 18 48 89 fb 49 8d 74 24 30 e8 e3 0e f3 de 84 c0 74 67 48 8b 53 20 48 8b 43 28 48 8b 7b 18 48 89 42 08 <48> 89 10 48 b8+G26 00 01 00 00 00 00 ad de 48 89 43 20 48 b8 00 02 00 [ 932.398516] RSP: 0018:ffffbbfd04cffc18 EFLAGS: 00010202 [ 932.398518] RAX: 0000000000000000 RBX: ffff92c7ea717880 RCX: 0000000000000000 [ 932.398519] RDX: ffff92c7ea713620 RSI: ffff92c7ea713630 RDI: ffff92c7ea713600 [ 932.398521] RBP: ffffbbfd04cffc28 R08: ffff92c7f02a8080 R09: ffff92c7efc03980 [ 932.398522] R10: ffffbbfd04cff9a8 R11: 0000000000000000 R12: ffff92c7ea713600 [ 932.398523] R13: ffff92c7ed8bb0a8 R14: ffff92c7ea717880 R15: 0000000000000000 [ 932.398525] FS: 00007f3031500740(0000) GS:ffff92c7f0280000(0000) knlGS:0000000000000000 [ 932.398526] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 932.398527] CR2: 0000000000000000 CR3: 0000000428626004 CR4: 0000000000160ee0 [ 932.398528] Call Trace: [ 932.398534] vfio_del_group_dev+0xe8/0x2a0 [vfio] [ 932.398539] ? __blocking_notifier_call_chain+0x52/0x60 [ 932.398542] ? do_wait_intr_irq+0x90/0x90 [ 932.398546] ? iommu_bus_notifier+0x75/0x100 [ 932.398551] vfio_pci_remove+0x20/0xa0 [vfio_pci] [ 932.398554] pci_device_remove+0x3e/0xc0 [ 932.398557] device_release_driver_internal+0x17a/0x240 [ 932.398560] device_release_driver+0x12/0x20 [ 932.398561] unbind_store+0xee/0x180 [ 932.398564] drv_attr_store+0x27/0x40 [ 932.398567] sysfs_kf_write+0x3c/0x50 [ 932.398568] kernfs_fop_write+0x125/0x1a0 [ 932.398572] __vfs_write+0x3a/0x190 [ 932.398575] ? apparmor_file_permission+0x1a/0x20 [ 932.398577] ? security_file_permission+0x3b/0xc0 [ 932.398581] ? _cond_resched+0x1a/0x50 [ 932.398582] vfs_write+0xb8/0x1b0 [ 932.398584] ksys_write+0x5c/0xe0 [ 932.398586] __x64_sys_write+0x1a/0x20 [ 932.398589] do_syscall_64+0x5a/0x110 [ 932.398592] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Using virt-manager/qemu to boot guest os, we can see the same fail sequence! Fix this by determine whether the PCI Driver of the USB controller is a kernel native driver. If not, do not let it modify UHCI's dev->driver_data. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2693 --- drivers/usb/core/hcd-pci.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 990280688b25..df8f91e6a2c7 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -48,6 +48,9 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); +#if IS_ENABLED(CONFIG_X86) + struct pci_driver *drv; +#endif /* * Iterate through other PCI functions in the same slot. @@ -60,6 +63,18 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, PCI_SLOT(companion->devfn) != slot) continue; +#if IS_ENABLED(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + drv = companion->driver; + if (drv && + strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && + strncmp(drv->name, "ohci-pci", sizeof("ohci-pci") - 1) && + strncmp(drv->name, "ehci-pci", sizeof("ehci-pci") - 1)) + continue; + } +#endif + /* * Companion device should be either UHCI,OHCI or EHCI host * controller, otherwise skip. -- Gitee From d1899f790399f4fa43685ba795b17f55973fa475 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 15:09:56 +0800 Subject: [PATCH 260/953] anolis: rtc: Fix set RTC time delay 500ms on some Zhaoxin SOCs ANBZ: #7809 When the RTC divider is changed from reset to an operating time base, the first update cycle should be 500ms later. But on some Zhaoxin SOCs, this first update cycle is one second later. So set RTC time on these Zhaoxin SOCs will causing 500ms delay. Skip setup RTC divider on these SOCs in mc146818_set_time to fix it. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2696 --- drivers/rtc/rtc-mc146818-lib.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 651bf3c279c7..6b5947ec6e55 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -11,6 +11,21 @@ #define UIP_RECHECK_DELAY 100 /* usec */ #define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY) #define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS) +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) + return false; + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif /* * Execute a function while the UIP (Update-in-progress) bit of the RTC is @@ -280,12 +295,13 @@ int mc146818_set_time(struct rtc_time *time) spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - if (apply_amd_register_a_behavior()) - CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); - else - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -302,7 +318,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); -- Gitee From 4c8e6e864cb8580ad0dd375898d2880b9236d0ff Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:05 +0800 Subject: [PATCH 261/953] anolis: Turning off Zhaoxin ahci controller runtime pm from sysfs ANBZ: #7809 There exits some problems with enabling ahci controller runtime pm on various Zhaoxin platforms. For some Zhaoxin CPUs, link into listen mode, and PHY into P2, which will cause the device to continuously send comminit. When the controller is in D3, the comminit will continuously wake up the controller, resulting in the controller being unable to stabilize at D3; For another Zhaoxin CPUs, after entering and exiting runtime pm for a certain number of times, ahci controller reset accumulates for many times, which will make ahci and PXPTRF P2CW think that the other party can no longer receive requests and block the P2CW path, resulting in failure to recognize the device; There are alse some Zhaoxin CPUs, the test is normal, but considering that the server platform is not sensitive to power consumption, and this part has very little impact on power consumption; In summary, it is recommended to close the zx ahci controller runtime pm. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2698 --- drivers/base/power/sysfs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a1474fb67db9..525574c312d3 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "power.h" /* @@ -108,7 +109,19 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { + struct pci_dev *pdev = (!dev || !dev_is_pci(dev)) ? NULL : to_pci_dev(dev); + device_lock(dev); + + /* Zhaoxin sata controller may occur error when resume from runtime pm, so disable it */ + if (pdev && + pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && + pdev->device == 0x9083 && + pdev->revision <= 0x20) { + device_unlock(dev); + return -EPERM; + } + if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) -- Gitee From a72f18f52449b91d42c447dcf3df605612f36aae Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 5 Feb 2024 16:09:33 +0800 Subject: [PATCH 262/953] anolis: Add support Zhaoxin GPIO pinctrl ANBZ: #7809 Implements gpio interrupt and gpio management functions and provides standard pinctrl and gpio interfaces. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2709 --- drivers/pinctrl/Kconfig | 1 + drivers/pinctrl/Makefile | 1 + drivers/pinctrl/zhaoxin/Kconfig | 28 + drivers/pinctrl/zhaoxin/Makefile | 4 + drivers/pinctrl/zhaoxin/pinctrl-kx7000.c | 354 ++++++++++ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c | 758 ++++++++++++++++++++++ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h | 136 ++++ 7 files changed, 1282 insertions(+) create mode 100644 drivers/pinctrl/zhaoxin/Kconfig create mode 100644 drivers/pinctrl/zhaoxin/Makefile create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-kx7000.c create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 7dfb7190580e..79753411b778 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -512,6 +512,7 @@ source "drivers/pinctrl/berlin/Kconfig" source "drivers/pinctrl/cirrus/Kconfig" source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" +source "drivers/pinctrl/zhaoxin/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/meson/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index dd6cda270294..4275eca92488 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += cirrus/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ +obj-$(CONFIG_X86) += zhaoxin/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-y += mvebu/ diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig new file mode 100644 index 000000000000..65f95ca80d5c --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# Intel pin control drivers + +if (X86 || COMPILE_TEST) + +config PINCTRL_ZHAOXIN + tristate + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + +config PINCTRL_KX7000 + tristate "Zhaoxin KX7000 pinctrl and GPIO driver" + depends on ACPI && X86 + default m + select PINCTRL_ZHAOXIN + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KX7000 chipset pins and using them as GPIOs. + + To compile this driver as a module, choose M here: the + module will be called pinctrl-kx7000. + + If unsure, say Y. + +endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile new file mode 100644 index 000000000000..a3acfa66f196 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -0,0 +1,4 @@ +# zhaoxin pin control drivers + +obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o +obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c new file mode 100644 index 000000000000..f249dd369e7c --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KX7000 pinctrl/GPIO driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include + +#include + +#include "pinctrl-zhaoxin.h" + +#define ZX_CAL_ARRAY(a, b) \ +{ \ + .pmio_offset = (a), \ + .size = (b), \ +} + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ +{ \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ +} + +/* kx7000 pin define */ +static const struct pinctrl_pin_desc kx7000_pins[] = { + + PINCTRL_PIN(0, "IOD_CPUTCK"), + PINCTRL_PIN(1, "IOD_CPUTMS"), + PINCTRL_PIN(2, "IOD_CPUTRST"), + PINCTRL_PIN(3, "IOD_CPUTDO"), + PINCTRL_PIN(4, "IOD_CPUTDI"), + PINCTRL_PIN(5, "IOD_ZLSCLK0"), + PINCTRL_PIN(6, "IOD_ZLDATA0"), + PINCTRL_PIN(7, "IOD_ZLSCLK1"), + PINCTRL_PIN(8, "IOD_ZLDATA1"), + PINCTRL_PIN(9, "IOD_CLK27M"), + PINCTRL_PIN(10, "IOD_CPURST"), + PINCTRL_PIN(11, "IOD_PWORK"), + PINCTRL_PIN(12, "IOD_RSMRST"), + PINCTRL_PIN(13, "IOD_THRMTRIP"), + //GPIO range 0 + PINCTRL_PIN(14, "USBHOC0"), + PINCTRL_PIN(15, "USBHOC1"), + PINCTRL_PIN(16, "USBHOC2"), + PINCTRL_PIN(17, "USBHOC3"), + PINCTRL_PIN(18, "USBHOC4"), + PINCTRL_PIN(19, "USBHOC5"), + PINCTRL_PIN(20, "USBHOC6"), + PINCTRL_PIN(21, "USBHOC7"), + //gpio range 1 + PINCTRL_PIN(22, "USB4SBTX0"), + PINCTRL_PIN(23, "USB4SBRX0"), + PINCTRL_PIN(24, "USB4SBTX1"), + PINCTRL_PIN(25, "USB4SBRX1"), + //gpio range 2 + PINCTRL_PIN(26, "I2C1DT"), + PINCTRL_PIN(27, "I2C1CK"), + PINCTRL_PIN(28, "I2C1INT"), + //gpio range 3 + PINCTRL_PIN(29, "I2C2DT"), + PINCTRL_PIN(30, "I2C2CK"), + //gpio range 4 + PINCTRL_PIN(31, "I2C2INT"), + //gpio range 5 + PINCTRL_PIN(32, "SMBDT1"), + PINCTRL_PIN(33, "SMBCK1"), + PINCTRL_PIN(34, "SMBDT2"), + PINCTRL_PIN(35, "SMBCK2"), + PINCTRL_PIN(36, "SMBALRT"), + //gpio range 6 + PINCTRL_PIN(37, "SME_I2CDT"), + PINCTRL_PIN(38, "SME_I2CCK"), + //gpio range 7 + PINCTRL_PIN(39, "PWM"), + PINCTRL_PIN(40, "TACH"), + //gpio range 8 + PINCTRL_PIN(41, "GPIO0"), + PINCTRL_PIN(42, "GPIO1"), + PINCTRL_PIN(43, "GPIO2"), + PINCTRL_PIN(44, "GPIO3"), + PINCTRL_PIN(45, "GPIO4"), + PINCTRL_PIN(46, "GPIO5"), + PINCTRL_PIN(47, "GPIO6"), + PINCTRL_PIN(48, "GPIO7"), + PINCTRL_PIN(49, "GPIO8"), + PINCTRL_PIN(50, "GPIO9"), + PINCTRL_PIN(51, "LPCCLK"), + PINCTRL_PIN(52, "LPCDRQ1"), + //gpio range 9 + PINCTRL_PIN(53, "LPCDRQ0"), + PINCTRL_PIN(54, "LPCFRAME"), + PINCTRL_PIN(55, "LPCAD3"), + PINCTRL_PIN(56, "LPCAD2"), + PINCTRL_PIN(57, "LPCAD1"), + PINCTRL_PIN(58, "LPCAD0"), + //gpio range 10 + PINCTRL_PIN(59, "SERIRQ"), + PINCTRL_PIN(60, "AZRST"), + PINCTRL_PIN(61, "AZBITCLK"), + PINCTRL_PIN(62, "AZSDIN0"), + PINCTRL_PIN(63, "AZSDIN1"), + PINCTRL_PIN(64, "AZSDOUT"), + PINCTRL_PIN(65, "AZSYNC"), + //gpio range 11 + PINCTRL_PIN(66, "I2S1_SCLK"), + PINCTRL_PIN(67, "I2S1_TXD"), + PINCTRL_PIN(68, "I2S1_WS"), + PINCTRL_PIN(69, "I2S1_MCLK"), + //gpio range 12 + PINCTRL_PIN(70, "I2S1_RXD"), + //gpio range 13 + PINCTRL_PIN(71, "I2S1_INT"), + PINCTRL_PIN(72, "MSPIDI"), + PINCTRL_PIN(73, "MSPIDO"), + PINCTRL_PIN(74, "MSPIIO2"), + PINCTRL_PIN(75, "MSPIIO3"), + PINCTRL_PIN(76, "MSPICLK"), + PINCTRL_PIN(77, "MSPISS0"), + //gpio range 14 + PINCTRL_PIN(78, "MSPISS1"), + PINCTRL_PIN(79, "MSPISS2"), + //gpio range 15 + PINCTRL_PIN(80, "SPIDEVINT"), + PINCTRL_PIN(81, "BIOSSEL"), + //gpio range 16 + PINCTRL_PIN(82, "THRM"), + PINCTRL_PIN(83, "PEXWAKE"), + PINCTRL_PIN(84, "PWRBTN"), + //gpio range 17 + PINCTRL_PIN(85, "SPKR"), + PINCTRL_PIN(86, "PME"), + //gpio range 18 + PINCTRL_PIN(87, "BATLOW"), + PINCTRL_PIN(88, "EXTSMI"), + PINCTRL_PIN(89, "SUSA"), + PINCTRL_PIN(90, "SUSB"), + PINCTRL_PIN(91, "SUSC"), + PINCTRL_PIN(92, "GPWAKE"), + PINCTRL_PIN(93, "RING"), + PINCTRL_PIN(94, "LID"), + PINCTRL_PIN(95, "SLPS0"), + PINCTRL_PIN(96, "PCIRST"), + PINCTRL_PIN(97, "SVID_VREN"), + //gpio range 19 + PINCTRL_PIN(98, "INTRUDER"), + //gpio range 20 + PINCTRL_PIN(99, "GFX_I2CCLK0"), + PINCTRL_PIN(100, "GFX_I2CDAT0"), + PINCTRL_PIN(101, "GFX_I2CCLK1"), + PINCTRL_PIN(102, "GFX_I2CDAT1"), + PINCTRL_PIN(103, "GFX_I2CCLK2"), + PINCTRL_PIN(104, "GFX_I2CDAT2"), + PINCTRL_PIN(105, "GFX_I2CCLK3"), + PINCTRL_PIN(106, "GFX_I2CDAT3"), + PINCTRL_PIN(107, "GFX_GPIO0"), + PINCTRL_PIN(108, "GFX_GPIO1"), + PINCTRL_PIN(109, "GFX_GPIO2"), + PINCTRL_PIN(110, "GFX_GPIO3"), + PINCTRL_PIN(111, "CRTHSYNC"), + PINCTRL_PIN(112, "CRTVSYNC"), +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static int calibrate_sattus[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static const struct reg_cal_array kx7000_int_cal[] = { + ZX_CAL_ARRAY(0x58, 16), + ZX_CAL_ARRAY(0x5A, 2), + ZX_CAL_ARRAY(0xDA, 16), + ZX_CAL_ARRAY(0xDE, 16), +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kx7000_int_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + } +}; + +static const struct reg_cal_array kx7000_status_cal[] = { + ZX_CAL_ARRAY((0x8), 16), + ZX_CAL_ARRAY((0xE), 2), + ZX_CAL_ARRAY((0xA), 16), + ZX_CAL_ARRAY((0xC), 16), +}; + +static const struct reg_calibrate status_cal[] = { + { + .reg = kx7000_status_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct reg_cal_array kx7000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0x0), 16), + ZX_CAL_ARRAY((0x6), 2), + ZX_CAL_ARRAY((0x2), 16), + ZX_CAL_ARRAY((0x4), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kx7000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct index_cal_array kx7000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0x98, NULL, 71), +}; + +static const struct index_cal_array kx7000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0x90, NULL, 71), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31, + 32, 33, 34, 35, + 36, 50, 51, 52, + 53, 54, 55, 56, + 57, 58, 59, 60, + 61, 62, 63, 64, + 65, 66, 67, 68, + 69, 70 +}; + +static const struct index_cal_array kx7000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), +}; + +static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kx7000_gpio_in_cal, + .gpio_out_cal = kx7000_gpio_out_cal, + .trigger_cal = kx7000_trigger_cal, + } +}; + +#define KX7000_GPP(s, e, g) \ +{ \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ +} + +static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { + KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(14, 19, 10), + KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(22, 25, 65), + KX7000_GPP(26, 28, 43), + KX7000_GPP(29, 30, 41), + KX7000_GPP(31, 31, 49), + KX7000_GPP(32, 36, 16), + KX7000_GPP(37, 38, 69), + KX7000_GPP(39, 40, 67), + KX7000_GPP(41, 50, 0), + KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(53, 53, 39), + KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(59, 59, 40), + KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(66, 69, 35), + KX7000_GPP(70, 70, 46), + KX7000_GPP(71, 71, 64), + KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(78, 78, 50), + KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(80, 80, 51), + KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(82, 82, 52), + KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(85, 85, 53), + KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(87, 95, 54), + KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(98, 98, 63), + KX7000_GPP(99, 112, 21), +}; + +static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { + .pins = kx7000_pins, + .npins = ARRAY_SIZE(kx7000_pins), + .pin_topologys = kx7000_pin_topologys, + .zhaoxin_pin_maps = kx7000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), +}; + +static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { + { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); + +static const struct dev_pm_ops kx7000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kx7000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_hid, + .driver = { + .name = "kx7000-pinctrl", + .acpi_match_table = kx7000_pinctrl_acpi_match, + .pm = &kx7000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kx7000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c new file mode 100644 index 000000000000..1e434869d3dd --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * zhaoxin pinctrl common code + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../core.h" +#include "pinctrl-zhaoxin.h" + +static int pin_to_hwgpio(struct pinctrl_gpio_range *range, unsigned int pin) +{ + int offset = 0; + + if (range->pins) { + for (offset = 0; offset < range->npins; offset++) + if (pin == range->pins[offset]) + break; + return range->base+offset-range->gc->base; + } else + return pin-range->pin_base+range->base-range->gc->base; +} + +static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + return inw(pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->ngroups; +} + +static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->groups[group].name; +} + +static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; + + return 0; +} + +static void zhaoxin_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) +{ + +} + +static const struct pinctrl_ops zhaoxin_pinctrl_ops = { + .get_groups_count = zhaoxin_get_groups_count, + .get_group_name = zhaoxin_get_group_name, + .get_group_pins = zhaoxin_get_group_pins, + .pin_dbg_show = zhaoxin_pin_dbg_show, +}; + +static int zhaoxin_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->nfunctions; +} + +static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->functions[function].name; +} + +static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, + const char * const **groups, unsigned int *const ngroups) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; + + return 0; +} + +static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + dev_dbg(pctrl->dev, "%s,group=%d,func=%d\n", __func__, group, function); + return 0; +} + +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 + +#define ZHAOXIN_PULL_UP 0xe0 + +static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, + bool isup) +{ + u16 tmp = 0; + u16 value; + u16 value_back = 0; + + if (isup) + tmp = ZHAOXIN_PULL_UP_10K|1; + else + tmp = ZHAOXIN_PULL_DOWN|1; + value = zx_pad_read16(pctrl, pin); + + /* for gpio */ + if (pin <= 0x32 && pin >= 0x29) { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value &= ~(0x1); + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } else {/* for pgpio */ + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value |= 0x1; + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } +} + +static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned int pin) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int hwgpio = pin_to_hwgpio(range, pin); + + dev_dbg(pctrl->dev, "%s, hwgpio=%d, pin=%d\n", __func__, hwgpio, pin); + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + return 0; +} + +static const struct pinmux_ops zhaoxin_pinmux_ops = { + .get_functions_count = zhaoxin_get_functions_count, + .get_function_name = zhaoxin_get_function_name, + .get_function_groups = zhaoxin_get_function_groups, + .set_mux = zhaoxin_pinmux_set_mux, + .gpio_request_enable = zhaoxin_gpio_request_enable, +}; + +static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) +{ + return 0; +} + +static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, + unsigned int nconfigs) +{ + return 0; +} + +static const struct pinconf_ops zhaoxin_pinconf_ops = { + .is_generic = true, + .pin_config_get = zhaoxin_config_get, + .pin_config_set = zhaoxin_config_set, +}; + +static const struct pinctrl_desc zhaoxin_pinctrl_desc = { + .pctlops = &zhaoxin_pinctrl_ops, + .pmxops = &zhaoxin_pinmux_ops, + .confops = &zhaoxin_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) +{ + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + const struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (offset >= map->zhaoxin_range_gpio_base && + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + int pin; + + pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; + if (padgrp) + *padgrp = map; + return pin; + } + } + return -EINVAL; +} + +static __maybe_unused int zhaoxin_pin_to_gpio(struct zhaoxin_pinctrl *pctrl, int pin) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + + pin_maps = pctrl->pin_maps; + if (!pin_maps) + return -EINVAL; + + return pin - pin_maps->zhaoxin_range_pin_base + pin_maps->zhaoxin_range_gpio_base; +} + +static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); + const struct index_cal_array *gpio_in_cal; + int gap = offset/16; + int bit = offset%16; + int pin; + int value; + + gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); + value &= (1<pin_topologys->gpio_out_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + if (value) + org |= (1<index+gap, org); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + return pinctrl_gpio_direction_output(chip->base + offset); +} + +static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + return gpiochip_generic_request(gc, offset); +} + +static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) +{ + gpiochip_generic_free(gc, offset); +} + +static int zhaoxin_gpio_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) +{ + return gpiochip_generic_config(gc, offset, config); +} + +static const struct gpio_chip zhaoxin_gpio_chip = { + .owner = THIS_MODULE, + .request = zhaoxin_gpio_request, + .free = zhaoxin_gpio_free, + .direction_input = zhaoxin_gpio_direction_input, + .direction_output = zhaoxin_gpio_direction_output, + .get = zhaoxin_gpio_get, + .set = zhaoxin_gpio_set, + .set_config = zhaoxin_gpio_config, +}; + +static void zhaoxin_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *status_cal; + const struct reg_cal_array *reg_off; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + int bit_off = 0; + u16 value; + u16 value_read; + + status_cal = pctrl->pin_topologys->status_cal; + if (gpio >= 0) { + for (i = 0; i < status_cal->size; i++) + if (gpio == status_cal->cal_array[i]) + break; + for (j = 0; j < status_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += status_cal->reg[j].size; + } + reg_off = &status_cal->reg[j-1]; + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); + value_read = value; + value |= (1<pm_pmio_base+reg_off->pmio_offset); + } +} + +static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + const struct reg_cal_array *reg_off, *mod; + int bit_off = 0; + u16 value; + u16 value1; + + int_cal = pctrl->pin_topologys->int_cal; + mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; + + if (gpio >= 0) { + for (i = 0; i < int_cal->size; i++) + if (gpio == int_cal->cal_array[i]) + break; + for (j = 0; j < int_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += int_cal->reg[j].size; + } + reg_off = &(int_cal->reg[j-1]); + mod = &(mod_sel_cal->reg[j-1]); + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = inw(pctrl->pmio_base+reg_off->pmio_offset); + if (mask) + value &= (~(1<pmio_base+reg_off->pmio_offset); + if (mask) { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } else { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } + } +} + +static void zhaoxin_gpio_irq_mask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, true); +} + +static void zhaoxin_gpio_irq_unmask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, false); +} + +/* + * father domain irq handle + */ +static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) +{ + struct zhaoxin_pinctrl *pctrl = data; + struct gpio_chip *gc = &pctrl->chip; + const struct reg_calibrate *init; + const struct reg_calibrate *stat_cal; + unsigned int i, bit_offset; + u16 status, enable; + unsigned long pending; + int index = 0; + int ret = 0; + int subirq; + unsigned int hwirq; + + init = pctrl->pin_topologys->int_cal; + stat_cal = pctrl->pin_topologys->status_cal; + for (i = 0; i < init->reg_cal_size; i++) { + pending = 0; + status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + enable &= status; + pending = enable; + for_each_set_bit(bit_offset, &pending, init->reg[i].size) { + hwirq = init->cal_array[index + bit_offset]; + subirq = irq_find_mapping(gc->irq.domain, hwirq); + generic_handle_irq(subirq); + } + + ret += pending ? 1 : 0; + index += init->reg[i].size; + } + + return IRQ_RETVAL(ret); +} + +static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int gpio = irqd_to_hwirq(d); + const struct index_cal_array *trigger_cal; + unsigned int pin; + unsigned long flags; + u8 index; + int position, point; + u16 value; + bool isup = true; + + trigger_cal = pctrl->pin_topologys->trigger_cal; + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (type & IRQ_TYPE_EDGE_FALLING) + isup = true; + else if (type & IRQ_TYPE_EDGE_RISING) + isup = true; + else if (type & IRQ_TYPE_LEVEL_LOW) + isup = true; + else if (type & IRQ_TYPE_LEVEL_HIGH) + isup = false; + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, isup); + + for (position = 0; position < trigger_cal->size; position++) + if (trigger_cal->cal_array[position] == gpio) + break; + + index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + point = position % 4; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + value = zx_pad_read16(pctrl, index); + + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + value |= TRIGGER_BOTH_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_FALLING) + value |= TRIGGER_FALL_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_RISING) + value |= TRIGGER_RISE_EDGE << (point*4); + else if (type & IRQ_TYPE_LEVEL_LOW) + value |= TRIGGER_LOW_LEVEL << (point*4); + else if (type & IRQ_TYPE_LEVEL_HIGH) + value |= TRIGGER_HIGH_LEVEL << (point*4); + else + dev_dbg(pctrl->dev, "%s wrang type\n", __func__); + + zx_pad_write16(pctrl, index, value); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int pin; + + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { + if (on) + enable_irq_wake(pctrl->irq); + else + disable_irq_wake(pctrl->irq); + } + + return 0; +} + +static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + int ret, i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + map->zhaoxin_range_gpio_base, map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; + } + } + + return 0; +} + +static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + unsigned int ngpio = 0; + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + pin_maps = &pctrl->pin_maps[i]; + if (pin_maps->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) + ngpio = pin_maps->zhaoxin_range_gpio_base + + pin_maps->zhaoxin_range_pin_size; + } + + return ngpio; +} + +static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) +{ + int ret; + struct gpio_irq_chip *girq; + + pctrl->chip = zhaoxin_gpio_chip; + + pctrl->chip.ngpio = zhaoxin_gpio_ngpio(pctrl); + pctrl->chip.label = dev_name(pctrl->dev); + pctrl->chip.parent = pctrl->dev; + pctrl->chip.base = -1; + pctrl->chip.add_pin_ranges = zhaoxin_gpio_add_pin_ranges; + + pctrl->irq = irq; + + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = zhaoxin_gpio_irq_ack; + pctrl->irqchip.irq_mask = zhaoxin_gpio_irq_mask; + pctrl->irqchip.irq_unmask = zhaoxin_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; + pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + + ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(pctrl->dev), pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to request interrupt\n"); + return ret; + } + girq = &pctrl->chip.irq; + girq->chip = &pctrl->irqchip; + /* This will let us handle the IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to register gpiochip\n"); + return ret; + } + + return 0; +} + +static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) +{ + return 0; +} + +static int zhaoxin_pinctrl_probe(struct platform_device *pdev, + const struct zhaoxin_pinctrl_soc_data *soc_data) +{ + struct zhaoxin_pinctrl *pctrl; + int ret, i, irq; + struct resource *res; + void __iomem *regs; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + raw_spin_lock_init(&pctrl->lock); + pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->pin_map_size = pctrl->soc->pin_map_size; + pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, + sizeof(*pctrl->pin_maps), GFP_KERNEL); + if (!pctrl->pin_maps) + return -ENOMEM; + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *community = &pctrl->pin_maps[i]; + *community = pctrl->soc->zhaoxin_pin_maps[i]; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + pctrl->pm_pmio_base = regs; + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = zhaoxin_pinctrl_pm_init(pctrl); + if (ret) + return ret; + pctrl->pctldesc = zhaoxin_pinctrl_desc; + pctrl->pctldesc.name = dev_name(&pdev->dev); + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; + pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); + if (IS_ERR(pctrl->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(pctrl->pctldev); + } + ret = zhaoxin_gpio_probe(pctrl, irq); + + if (ret) + return ret; + platform_set_drvdata(pdev, pctrl); + return 0; +} + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = device_get_match_data(&pdev->dev); + if (!data) + return -ENODATA; + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_hid); + +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = zhaoxin_pinctrl_get_soc_data(pdev); + if (IS_ERR(data)) + return PTR_ERR(data); + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_uid); + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data = NULL; + const struct zhaoxin_pinctrl_soc_data **table; + struct acpi_device *adev; + unsigned int i; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) { + const void *match = device_get_match_data(&pdev->dev); + + table = (const struct zhaoxin_pinctrl_soc_data **)match; + for (i = 0; table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { + data = table[i]; + break; + } + } + } else { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return ERR_PTR(-ENODEV); + + table = (const struct zhaoxin_pinctrl_soc_data **)id->driver_data; + data = table[pdev->id]; + } + + return data ?: ERR_PTR(-ENODATA); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_get_soc_data); + +#ifdef CONFIG_PM_SLEEP + +int zhaoxin_pinctrl_suspend_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_suspend_noirq); + +int zhaoxin_pinctrl_resume_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); +#endif + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zhaoxin pinctrl/GPIO core driver"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h new file mode 100644 index 000000000000..cebea382dbe9 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * zhaoxin pinctrl common code + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + */ + +#ifndef PINCTRL_zhaoxin_H +#define PINCTRL_zhaoxin_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct platform_device; +struct device; + +/** + * struct zhaoxin_pingroup pin define + */ +struct zhaoxin_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + unsigned short mode; + const unsigned int *modes; +}; + +/** + * struct zhaoxin_function + */ +struct zhaoxin_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +/** + * struct zhaoxin_pin_map2_gpio + * @zhaoxin_range_pin_base + * @size: pin number + * @zhaoxin_range_gpio_base + */ +struct zhaoxin_pin_map2_gpio { + unsigned int zhaoxin_range_pin_base; + unsigned int zhaoxin_range_pin_size; + int zhaoxin_range_gpio_base; +}; + +#define MAX_GPIO 256 + +struct reg_cal_array { + int pmio_offset; + int size; +}; + +struct reg_calibrate { + const struct reg_cal_array *reg; + const int reg_cal_size; + const int *cal_array; + const int size; +}; + +struct index_cal_array { + int reg_port_base; + int reg_data_base; + int index; + int *cal_array; + int size; +}; + +struct zhaoxin_pin_topology { + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + const struct reg_calibrate *status_cal; + const struct index_cal_array *gpio_in_cal; + const struct index_cal_array *gpio_out_cal; + const struct index_cal_array *gpio_dir_cal; + const struct index_cal_array *trigger_cal; +}; + +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 + +struct zhaoxin_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct zhaoxin_pingroup *groups; + size_t ngroups; + const struct zhaoxin_function *functions; + size_t nfunctions; + const struct zhaoxin_pin_topology *pin_topologys; + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; + size_t pin_map_size; +}; + +const struct zhaoxin_pinctrl_soc_data * + zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); + +struct zhaoxin_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct zhaoxin_pinctrl_soc_data *soc; + const struct zhaoxin_pin_topology *pin_topologys; + struct zhaoxin_pin_map2_gpio *pin_maps; + size_t pin_map_size; + int irq; + int pmio_base; + void __iomem *pm_pmio_base; + int pmio_rx90; + int pmio_rx8c; +}; + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev); +int zhaoxin_pinctrl_resume_noirq(struct device *dev); +#endif + +#endif /* PINCTRL_zhaoxin_H */ -- Gitee From 9c8ac63d2512b669f89a78bb37afcb7b695d1552 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 5 Feb 2024 16:19:37 +0800 Subject: [PATCH 263/953] anolis: configs: add CONFIG_PINCTRL_ZHAOXIN and KX7000 ANBZ: #7809 Set CONFIG_PINCTRL_ZHAOXIN and CONFIG_PINCTRL_KX7000 to m in openeulr_defconfig. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2709 --- arch/x86/configs/anolis_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 38b641e976d0..4abe45995548 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -3901,6 +3901,9 @@ CONFIG_PINCTRL_SUNRISEPOINT=m # CONFIG_PINCTRL_TIGERLAKE is not set # end of Intel pinctrl drivers +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + # # Renesas pinctrl drivers # -- Gitee From 9986a8240030e2ea60793f613931dec3c5ebb58b Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 19:00:15 +0800 Subject: [PATCH 264/953] anolis: i2c: smbus: Add support for Zhaoxin SMBUS controller ANBZ: #7809 The Zhaoxin platform implements the SMBUS controller on the hardware, enabling information exchange and collaboration between devices using the SMBus protocol. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2711 --- drivers/i2c/busses/Kconfig | 10 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-zhaoxin-smbus.c | 385 +++++++++++++++++++++++++ 3 files changed, 396 insertions(+) create mode 100644 drivers/i2c/busses/i2c-zhaoxin-smbus.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1ddff0ec93fc..ac2c09df6189 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -359,6 +359,16 @@ config I2C_SCMI To compile this driver as a module, choose M here: the module will be called i2c-scmi. +config I2C_ZHAOXIN_SMBUS + tristate "Zhaoxin SMBus Interface" + depends on PCI || COMPILE_TEST + help + If you say yes to this option, support will be included for the + ZHAOXIN SMBus interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin-smbus. + endif # ACPI comment "Mac SMBus host controller drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index bef7c205433b..f8c8a3554427 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -139,6 +139,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o +obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c new file mode 100644 index 000000000000..52c689e928af --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin SMBus controller driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "3.1.0" + +#define ZXSMB_NAME "smbus_zhaoxin" + +/* + * registers + */ +/* SMBus MMIO address offsets */ +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 + +/* + * platform related information + */ + /* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C + +#define ZXSMB_TIMEOUT 500 + +struct zxsmb { + struct device *dev; + struct i2c_adapter adap; + struct completion complete; + u16 base; + int irq; + u8 status; + int size; + u8 pec; +}; + +static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) +{ + struct zxsmb *smb = (struct zxsmb *)dev_id; + + smb->status = inb(smb->base + ZXSMB_STS); + if ((smb->status & ZXSMB_STS_MASK) == 0) + return IRQ_NONE; + + /* clear status */ + outb(smb->status, smb->base + ZXSMB_STS); + complete(&smb->complete); + + return IRQ_HANDLED; +} + +static int zxsmb_status_check(struct zxsmb *smb) +{ + if (smb->status & ZXSMB_CMD_CMPLET) + return 0; + + if (smb->status & ZXSMB_BUS_CLSI) { + dev_err(smb->dev, "Lost arbitration\n"); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + return -EAGAIN; + } + + dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status); + + return -EIO; +} + +static int zxsmb_wait_interrput_finish(struct zxsmb *smb) +{ + int time_left; + + time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT)); + if (time_left == 0) { + u8 status = inb(smb->base + ZXSMB_STS); + + /* some host's irq config not work well */ + if (status & ZXSMB_STS_MASK) { + outb(status, smb->base + ZXSMB_STS); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + devm_free_irq(smb->dev, smb->irq, smb); + smb->irq = 0; + dev_warn(smb->dev, "change to polling mode\n"); + + return -EAGAIN; + } + dev_dbg(smb->dev, "interrput timeout\n"); + return -EIO; + } + + return zxsmb_status_check(smb); +} + +static int zxsmb_wait_polling_finish(struct zxsmb *smb) +{ + int status; + int time_left = ZXSMB_TIMEOUT * 10; + + do { + usleep_range(100, 200); + status = inb(smb->base + ZXSMB_STS); + } while ((status & ZXSMB_BUSY) && (--time_left)); + + if (time_left == 0) { + dev_dbg(smb->dev, "polling timeout\n"); + return -EIO; + } + + /* clear status */ + outb(status, smb->base + ZXSMB_STS); + smb->status = status; + + return zxsmb_status_check(smb); +} + +static int zxsmb_trans_start(struct zxsmb *smb) +{ + u16 base = smb->base; + int tmp; + + /* Make sure the SMBus host is ready to start transmitting */ + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + outb(tmp, base + ZXSMB_STS); + usleep_range(1000, 5000); + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp); + return -EIO; + } + } + + tmp = ZXSMB_START | smb->size; + + if (smb->pec) + tmp |= ZXSMB_PEC_EN; + else + tmp &= (~ZXSMB_PEC_EN); + + if (smb->irq) + tmp |= ZXSMB_CMPLT_EN; + + reinit_completion(&smb->complete); + smb->status = 0; + outb(tmp, base + ZXSMB_CTL); + return 0; +} + +static int zxsmb_transaction(struct zxsmb *smb) +{ + int err; + + err = zxsmb_trans_start(smb); + if (err) + return err; + + if (smb->irq) + err = zxsmb_wait_interrput_finish(smb); + else + err = zxsmb_wait_polling_finish(smb); + + outb(0, smb->base + ZXSMB_CTL); + return err; +} + +static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, + int size, union i2c_smbus_data *data) +{ + int i; + int err; + u8 len; + struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap); + u16 base = smb->base; + + switch (size) { + case I2C_SMBUS_QUICK: + size = ZXSMB_QUICK; + break; + case I2C_SMBUS_BYTE: + size = ZXSMB_BYTE; + if (!read) + outb(command, base + ZXSMB_CMD); + break; + case I2C_SMBUS_BYTE_DATA: + outb(command, base + ZXSMB_CMD); + if (!read) + outb(data->byte, base + ZXSMB_DAT0); + size = ZXSMB_BYTE_DATA; + break; + case I2C_SMBUS_PROC_CALL: + case I2C_SMBUS_WORD_DATA: + if (read && size == I2C_SMBUS_PROC_CALL) + goto exit_unsupported; + outb(command, base + ZXSMB_CMD); + if (!read) { + outb(data->word & 0xff, base + ZXSMB_DAT0); + outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); + } + size = (size == I2C_SMBUS_PROC_CALL) ? + ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + case I2C_SMBUS_BLOCK_DATA: + len = data->block[0]; + if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) + outb(len, base + ZXSMB_DAT1); + outb(command, base + ZXSMB_CMD); + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + if (!read) { + outb(len, base + ZXSMB_DAT0); + outb(0, base + ZXSMB_DAT1); + for (i = 1; i <= len; i++) + outb(data->block[i], base + ZXSMB_BLKDAT); + } + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? + ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + break; + default: + goto exit_unsupported; + } + + outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD); + smb->size = size; + smb->pec = flags & I2C_CLIENT_PEC; + err = zxsmb_transaction(smb); + if (err) + return err; + + if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) { + if (unlikely(size == ZXSMB_PROC_CALL)) + goto prepare_read; + return 0; + } + +prepare_read: + switch (size) { + case ZXSMB_BYTE: + case ZXSMB_BYTE_DATA: + data->byte = inb(base + ZXSMB_DAT0); + break; + case ZXSMB_PROC_CALL: + case ZXSMB_WORD_DATA: + data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); + break; + case ZXSMB_I2C_BLOCK_DATA: + case ZXSMB_BLOCK_DATA: + data->block[0] = inb(base + ZXSMB_DAT0); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + for (i = 1; i <= data->block[0]; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; + } + + return 0; + +exit_unsupported: + dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write"); + return -EOPNOTSUPP; +} + +static u32 zxsmb_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smbus_algorithm = { + .smbus_xfer = zxsmb_smbus_xfer, + .functionality = zxsmb_func, +}; + +static int zxsmb_probe(struct platform_device *pdev) +{ + struct zxsmb *smb; + struct resource *res; + struct i2c_adapter *adap; + + smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); + if (!smb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (IS_ERR(res)) + return -ENODEV; + smb->base = res->start; + if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "Can't get I/O resource\n"); + return -EBUSY; + } + + smb->irq = platform_get_irq(pdev, 0); + if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, + pdev->name, smb)) { + dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); + smb->irq = 0; + } else + init_completion(&smb->complete); + + smb->dev = &pdev->dev; + platform_set_drvdata(pdev, (void *)smb); + + adap = &smb->adap; + adap->algo = &smbus_algorithm; + adap->retries = 2; + adap->owner = THIS_MODULE; + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(smb->dev)); + i2c_set_adapdata(&smb->adap, smb); + + return i2c_add_adapter(&smb->adap); +} + +static int zxsmb_remove(struct platform_device *pdev) +{ + struct zxsmb *smb = platform_get_drvdata(pdev); + + i2c_del_adapter(&(smb->adap)); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, smb); + + return 0; +} + +static const struct acpi_device_id zxsmb_acpi_match[] = { + {"SMB3324", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); + +static struct platform_driver zxsmb_driver = { + .probe = zxsmb_probe, + .remove = zxsmb_remove, + .driver = { + .name = ZXSMB_NAME, + .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + }, +}; + +module_platform_driver(zxsmb_driver); + +MODULE_AUTHOR("hanshu@zhaoxin.com"); +MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); -- Gitee From 5f0704452b441e7faf5aea0d8a357b1e7b67ed1d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 15 Mar 2024 11:36:57 +0800 Subject: [PATCH 265/953] fs/address_space: move i_mmap_rwsem to mitigate a false sharing with i_mmap. ANBZ: #8544 commit d3b1a9a778e1a014c5331d1e8d4863fd999eb0b5 upstream. In the struct address_space, there is a 32-byte gap between i_mmap and i_mmap_rwsem. Due to the alignment of struct address_space variables to 8 bytes, in certain situations, i_mmap and i_mmap_rwsem may end up in the same CACHE line. While running Unixbench/execl, we observe high false sharing issues when accessing i_mmap against i_mmap_rwsem. We move i_mmap_rwsem after i_private_list, ensuring a 64-byte gap between i_mmap and i_mmap_rwsem. For Intel Silver machines (2 sockets) using kernel v6.8 rc-2, the score of Unixbench/execl improves by ~3.94%, and the score of Unixbench/shell improves by ~3.26%. Baseline: ------------------------------------------------------------- 162 546 748 11374 21 0xffff92e266af90c0 ------------------------------------------------------------- 46.89% 44.65% 0.00% 0.00% 0x0 1 1 0xffffffff86d5fb96 460 258 271 1069 32 [k] __handle_mm_fault [kernel.vmlinux] memory.c:2940 0 1 4.21% 4.41% 0.00% 0.00% 0x4 1 1 0xffffffff86d0ed54 473 311 288 95 28 [k] filemap_read [kernel.vmlinux] atomic.h:23 0 1 0.00% 0.00% 0.04% 4.76% 0x8 1 1 0xffffffff86d4bcf1 0 0 0 5 4 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:204 0 1 6.41% 6.02% 0.00% 0.00% 0x8 1 1 0xffffffff86d4ba85 411 271 339 210 32 [k] vma_interval_tree_insert [kernel.vmlinux] interval_tree.c:23 0 1 0.00% 0.00% 0.47% 95.24% 0x10 1 1 0xffffffff86d4bd34 0 0 0 74 32 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:339 0 1 0.37% 0.13% 0.00% 0.00% 0x10 1 1 0xffffffff86d4bb4f 328 212 380 7 5 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 5.13% 5.08% 0.00% 0.00% 0x10 1 1 0xffffffff86d4bb4b 416 255 357 197 32 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 1.10% 0.53% 0.00% 0.00% 0x28 1 1 0xffffffff86e06eb8 395 228 351 24 14 [k] do_dentry_open [kernel.vmlinux] open.c:966 0 1 1.10% 2.14% 57.07% 0.00% 0x38 1 1 0xffffffff878c9225 1364 792 462 7003 32 [k] down_write [kernel.vmlinux] atomic64_64.h:109 0 1 0.00% 0.00% 0.01% 0.00% 0x38 1 1 0xffffffff878c8e75 0 0 252 3 2 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:109 0 1 0.00% 0.13% 0.00% 0.00% 0x38 1 1 0xffffffff878c8e23 0 596 63 2 2 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:15 0 1 2.38% 2.94% 6.53% 0.00% 0x38 1 1 0xffffffff878c8ccb 1150 818 570 1197 32 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:109 0 1 30.59% 32.22% 0.00% 0.00% 0x38 1 1 0xffffffff878c8cb4 423 251 380 648 32 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:15 0 1 1.83% 1.74% 35.88% 0.00% 0x38 1 1 0xffffffff86b4f833 1217 1112 565 4586 32 [k] up_write [kernel.vmlinux] atomic64_64.h:91 0 1 with this change: ------------------------------------------------------------- 360 12 300 57 35 0xffff982cdae76400 ------------------------------------------------------------- 50.00% 59.67% 0.00% 0.00% 0x0 1 1 0xffffffff8215fb86 352 200 191 558 32 [k] __handle_mm_fault [kernel.vmlinux] memory.c:2940 0 1 8.33% 5.00% 0.00% 0.00% 0x4 1 1 0xffffffff8210ed44 370 284 263 42 24 [k] filemap_read [kernel.vmlinux] atomic.h:23 0 1 0.00% 0.00% 5.26% 2.86% 0x8 1 1 0xffffffff8214bce1 0 0 0 4 4 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:204 0 1 33.33% 14.33% 0.00% 0.00% 0x8 1 1 0xffffffff8214ba75 344 186 219 140 32 [k] vma_interval_tree_insert [kernel.vmlinux] interval_tree.c:23 0 1 0.00% 0.00% 94.74% 97.14% 0x10 1 1 0xffffffff8214bd24 0 0 0 88 29 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:339 0 1 8.33% 20.00% 0.00% 0.00% 0x10 1 1 0xffffffff8214bb3b 296 209 226 167 31 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 0.00% 0.67% 0.00% 0.00% 0x28 1 1 0xffffffff82206f45 0 140 334 4 3 [k] do_dentry_open [kernel.vmlinux] open.c:966 0 1 0.00% 0.33% 0.00% 0.00% 0x38 1 1 0xffffffff8250a6c4 0 286 126 5 5 [k] errseq_sample [kernel.vmlinux] errseq.c:125 0 Signed-off-by: JonasZhou Link: https://lore.kernel.org/r/20240202083304.10995-1-JonasZhou-oc@zhaoxin.com Signed-off-by: Christian Brauner Signed-off-by: leoliu-oc Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2888 --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index ee5efad0d780..59b0fbaf6eb0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -484,10 +484,10 @@ struct address_space { pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; - struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; + struct rw_semaphore i_mmap_rwsem; void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* -- Gitee From 2d88fcb87207b9feb8380e6d60d6eb8b5f6dbfb0 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 8 Mar 2024 14:50:42 +0800 Subject: [PATCH 266/953] anolis: kfence: enhance kfence for 6.6 ANBZ: #8499 Port anolis own kfence features from ANCK5.10. Enhance the ability of memory debugging about slub objects. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- arch/arm64/include/asm/kfence.h | 9 +- arch/arm64/mm/mmu.c | 13 +- arch/x86/include/asm/kfence.h | 50 +- include/linux/kfence.h | 110 +- include/linux/page-flags.h | 13 + include/trace/events/mmflags.h | 9 +- mm/kfence/core.c | 1880 ++++++++++++++++++++++++++----- mm/kfence/kfence.h | 77 +- mm/kfence/kfence_test.c | 32 +- mm/kfence/report.c | 17 +- mm/slab.c | 2 +- mm/slub.c | 2 +- 12 files changed, 1845 insertions(+), 369 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index a81937fae9f6..e5f86bbf4348 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,14 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include + #include -static inline bool arch_kfence_init_pool(void) { return true; } +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) +{ + return can_set_direct_map(); +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { @@ -19,6 +24,8 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } + #ifdef CONFIG_KFENCE extern bool kfence_early_init; static inline bool arm64_kfence_can_set_direct_map(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 47781bec6171..3e26d444569e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -511,6 +511,9 @@ void __init mark_linear_text_alias_ro(void) #ifdef CONFIG_KFENCE +static unsigned long __ro_after_init +kfence_pool_size = ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE); + bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; /* early_param() will be parsed before map_mem() below. */ @@ -531,7 +534,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) if (!kfence_early_init) return 0; - kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + kfence_pool = memblock_phys_alloc(kfence_pool_size, PAGE_SIZE); if (!kfence_pool) { pr_err("failed to allocate kfence pool\n"); kfence_early_init = false; @@ -539,7 +542,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) } /* Temporarily mark as NOMAP. */ - memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + memblock_mark_nomap(kfence_pool, kfence_pool_size); return kfence_pool; } @@ -550,11 +553,11 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) return; /* KFENCE pool needs page-level mapping. */ - __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = phys_to_virt(kfence_pool); + memblock_clear_nomap(kfence_pool, kfence_pool_size); + __kfence_pool_early_init = phys_to_virt(kfence_pool); } #else /* CONFIG_KFENCE */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37a..ba344b416ac5 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -19,11 +19,12 @@ #include /* Force 4K pages for __kfence_pool. */ -static inline bool arch_kfence_init_pool(void) +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { + char *__kfence_pool = kpa->addr; unsigned long addr; - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + for (addr = (unsigned long)__kfence_pool; is_kfence_address_area((void *)addr, kpa); addr += PAGE_SIZE) { unsigned int level; @@ -68,6 +69,51 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +/* + * This function is used to recover TLB to 1G kernel mapping. + * The caller MUST make sure there're no other active kfence + * pools in this 1G area. + */ +static inline bool arch_kfence_free_pool(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud, new_pud, old_pud; + + addr = ALIGN_DOWN(addr, PUD_SIZE); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return false; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + + if (p4d_large(*p4d) || !p4d_present(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; + + if (pud_large(*pud) || !pud_present(*pud)) + return false; + + new_pud = pfn_pud((unsigned long)__phys_to_pfn(__pa(addr)), + __pgprot(__PAGE_KERNEL_LARGE)); + + old_pud = xchg(pud, new_pud); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + if (!pud_free_pmd_page(&old_pud, addr)) { + pr_warn("free old TLB error at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); + } + + return true; +} + #endif /* !MODULE */ #endif /* _ASM_X86_KFENCE_H */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 401af4757514..bb24956fefd8 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -16,19 +16,45 @@ #include #include +#include +#include -extern unsigned long kfence_sample_interval; +extern long kfence_sample_interval; -/* - * We allocate an even number of pages, as it simplifies calculations to map - * address to metadata indices; effectively, the very first page serves as an - * extended guard page, but otherwise has no special purpose. - */ -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) -extern char *__kfence_pool; +struct kfence_pool_area { + struct rb_node rb_node; /* binary tree linked to root */ + struct kfence_metadata *meta; /* metadata per area */ + char *addr; /* start kfence pool address */ + unsigned long pool_size; /* size of kfence pool of this area */ + unsigned long nr_objects; /* max object number of this area, 0 marked as zombie area */ + int node; /* the numa node (freelist) this area belongs to, likely from phy mem node */ + atomic_t _ref; /* count kpa ref, to protect kpa itself */ + struct list_head list; /* ready to be added to kfence_pool_root */ + struct percpu_ref refcnt; /* count in-use objects, to protect pool, meta, etc... */ + struct work_struct work; /* use workqueue to free unused area */ +}; DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); extern atomic_t kfence_allocation_gate; +extern unsigned long kfence_num_objects; +extern char *__kfence_pool_early_init; + +/** + * is_kfence_address_area() - check if an address belongs to KFENCE pool in given area + * @addr: address to check + * @kpa: area to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range in given area. + * + * This function is used when you already know the nearest leftside area. + */ +static __always_inline bool is_kfence_address_area(const void *addr, + const struct kfence_pool_area *kpa) +{ + return unlikely(kpa && (unsigned long)((char *)addr - kpa->addr) < kpa->pool_size); +} /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -50,12 +76,17 @@ extern atomic_t kfence_allocation_gate; */ static __always_inline bool is_kfence_address(const void *addr) { +#if defined(CONFIG_KASAN) || defined(CONFIG_DEBUG_KMEMLEAK) /* - * The __kfence_pool != NULL check is required to deal with the case - * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in - * the slow-path after the range-check! + * KASAN functions such as kasan_record_aux_stack(), + * kasan_poison_shadow(), or kasan_unpoison_shadow() + * may give an invalid kaddr (direct mapping kernel address). + * We must add a check here. */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); + return virt_addr_valid(addr) && PageKfence(virt_to_page(addr)); +#else + return PageKfence(virt_to_page(addr)); +#endif } /** @@ -72,6 +103,17 @@ void __init kfence_alloc_pool_and_metadata(void); */ void __init kfence_init(void); +/** + * update_kfence_booting_max() - analyse the max num_objects from cmdline + * + * Read the config from boot cmdline and limit kfence pool size. + * This function is called by kfence itself (e.g., kfence_alloc_pool()), or, + * by specific arch alloc (e.g., arm64_kfence_alloc_pool()). + * + * Return: 1 if kfence_num_objects is changed, otherwise 0. + */ +int __init update_kfence_booting_max(void); + /** * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects * @s: cache being shut down @@ -97,7 +139,7 @@ void kfence_shutdown_cache(struct kmem_cache *s); * Allocate a KFENCE object. Allocators must not call this function directly, * use kfence_alloc() instead. */ -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -124,9 +166,43 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp if (!static_branch_likely(&kfence_allocation_key)) return NULL; #endif - if (likely(atomic_read(&kfence_allocation_gate))) + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) return NULL; - return __kfence_alloc(s, size, flags); + return __kfence_alloc(s, size, flags, NUMA_NO_NODE); +} + +/** + * kfence_alloc_node() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * @node: alloc from kfence pool on which node + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc_node() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, + int node) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + return __kfence_alloc(s, size, flags, node); } /** @@ -228,6 +304,10 @@ static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, int node) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5c02720c53a5..5f4b2f18d8d5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -135,6 +135,9 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, PG_arch_3, +#endif +#ifdef CONFIG_KFENCE + PG_kfence, /* Page in kfence pool */ #endif __NR_PAGEFLAGS, @@ -603,6 +606,10 @@ PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif +#ifdef CONFIG_KFENCE +__PAGEFLAG(Kfence, kfence, PF_ANY) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -1050,6 +1057,12 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define __PG_MLOCKED 0 #endif +#ifdef CONFIG_KFENCE +#define __PG_KFENCE (1UL << PG_kfence) +#else +#define __PG_KFENCE 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. If they are, there is a problem. diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 1478b9dd05fa..08fd0425f478 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -95,6 +95,12 @@ #define IF_HAVE_PG_ARCH_X(_name) #endif +#ifdef CONFIG_KFENCE +#define IF_HAVE_PG_KFENCE(_name) ,{1UL << PG_##_name, __stringify(_name)} +#else +#define IF_HAVE_PG_KFENCE(_name) +#endif + #define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) } #define __def_pageflag_names \ @@ -125,7 +131,8 @@ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ IF_HAVE_PG_IDLE(young) \ IF_HAVE_PG_ARCH_X(arch_2) \ -IF_HAVE_PG_ARCH_X(arch_3) +IF_HAVE_PG_ARCH_X(arch_3) \ +IF_HAVE_PG_KFENCE(kfence) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 3872528d0963..01d5945e1351 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include @@ -40,8 +42,9 @@ ({ \ const bool __cond = WARN_ON(cond); \ if (unlikely(__cond)) { \ - WRITE_ONCE(kfence_enabled, false); \ disabled_by_warn = true; \ + WRITE_ONCE(kfence_enabled, false); \ + static_branch_disable(&kfence_allocation_key); \ } \ __cond; \ }) @@ -50,8 +53,27 @@ static bool kfence_enabled __read_mostly; static bool disabled_by_warn __read_mostly; +/* true = node mode, false = global mode. */ +static bool kfence_pool_node_mode __read_mostly; +static DEFINE_MUTEX(kfence_mutex); +unsigned long kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS; +EXPORT_SYMBOL_GPL(kfence_num_objects); +static unsigned long kfence_num_objects_snap __read_mostly; /* Used to record upstream ver. */ +static int *kfence_node_map __read_mostly; /* Map real node to "virtual kfence node". */ +bool kfence_panic_on_fault __read_mostly; +struct kfence_alloc_node_cond { + long need; + long allocated; +}; +/* + * An array to record how many objects need to be allocated + * and how many has been allocated on each node. + */ +static struct kfence_alloc_node_cond *kfence_num_objects_stat; +/* Only used in BOOTING, record partition info about __kfence_pool_area[] */ +static unsigned long kfence_nr_areas_per_node; -unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #ifdef MODULE_PARAM_PREFIX @@ -59,25 +81,40 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." -static int kfence_enable_late(void); +DEFINE_STATIC_KEY_FALSE(kfence_short_canary); +DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); +static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); + +#define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) + +static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { unsigned long num; - int ret = kstrtoul(val, 0, &num); + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - /* Using 0 to indicate KFENCE is disabled. */ - if (!num && READ_ONCE(kfence_enabled)) { - pr_info("disabled\n"); - WRITE_ONCE(kfence_enabled, false); + if (system_state == SYSTEM_BOOTING) { + *((long *)kp->arg) = num; + return 0; } - *((unsigned long *)kp->arg) = num; + /* Not allow sample interval switching between positive and negative */ + if ((kfence_sample_interval > 0 && num < 0) || + (kfence_sample_interval < 0 && num > 0)) { + return -EINVAL; + } + + if (!num) /* Using 0 to indicate KFENCE is disabled. */ + kfence_disable(); + + *((long *)kp->arg) = num; + + if (num && !READ_ONCE(kfence_enabled)) + return disabled_by_warn ? -EINVAL : (kfence_enable_late(), 0); - if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -86,7 +123,7 @@ static int param_get_sample_interval(char *buffer, const struct kernel_param *kp if (!READ_ONCE(kfence_enabled)) return sprintf(buffer, "0\n"); - return param_get_ulong(buffer, kp); + return param_get_long(buffer, kp); } static const struct kernel_param_ops sample_interval_param_ops = { @@ -95,6 +132,107 @@ static const struct kernel_param_ops sample_interval_param_ops = { }; module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); +static int param_set_num_objects(const char *val, const struct kernel_param *kp) +{ + unsigned long num; + int ret = kstrtoul(val, 0, &num); + + if (ret < 0) + return ret; + +#ifdef CONFIG_ARM64 + if (system_state == SYSTEM_BOOTING) + return 0; +#endif + + if (!num) + return -EINVAL; + + mutex_lock(&kfence_mutex); + + if (READ_ONCE(kfence_enabled)) { + ret = -EBUSY; /* can not change num_objects when enabled */ + goto out_unlock; + } + + *((unsigned long *)kp->arg) = num; + ret = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + return ret; +} + +static int param_get_num_objects(char *buffer, const struct kernel_param *kp) +{ + return param_get_ulong(buffer, kp); +} + +static const struct kernel_param_ops num_objects_param_ops = { + .set = param_set_num_objects, + .get = param_get_num_objects, +}; +module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); + +static int param_set_pool_mode(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (READ_ONCE(kfence_enabled)) + return -EINVAL; /* can not change mode when enabled */ + + if (!strcmp(s, "global")) + mode = false; + else if (!strcmp(s, "node")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_pool_mode(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "node" : "global"); +} + +static const struct kernel_param_ops pool_mode_param_ops = { + .set = param_set_pool_mode, + .get = param_get_pool_mode, +}; +module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); + +static int param_set_fault(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (!strcmp(s, "report")) + mode = false; + else if (!strcmp(s, "panic")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_fault(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "panic" : "report"); +} + +static const struct kernel_param_ops fault_param_ops = { + .set = param_set_fault, + .get = param_get_fault, +}; +module_param_cb(fault, &fault_param_ops, &kfence_panic_on_fault, 0600); + /* Pool usage% threshold when currently covered allocations are skipped. */ static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); @@ -107,28 +245,39 @@ module_param_named(deferrable, kfence_deferrable, bool, 0444); static bool kfence_check_on_panic __read_mostly; module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); -/* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __read_mostly; -EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ - /* - * Per-object metadata, with one-to-one mapping of object metadata to - * backing pages (in __kfence_pool). + * The pool of pages used for guard pages and objects. + * Only used in booting init state. Will be cleared after that. */ -static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); -struct kfence_metadata *kfence_metadata __read_mostly; +char **__kfence_pool_area; /* - * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). - * So introduce kfence_metadata_init to initialize metadata, and then make - * kfence_metadata visible after initialization is successful. This prevents - * potential UAF or access to uninitialized metadata. + * The pool of pages should be reserved earlier than kfence initialization. It's + * only assigned in arm64 architecture. */ -static struct kfence_metadata *kfence_metadata_init __read_mostly; +char *__kfence_pool_early_init; + +/* The binary tree maintaining all kfence pool areas */ +struct rb_root kfence_pool_root = RB_ROOT; +EXPORT_SYMBOL_GPL(kfence_pool_root); /* Freelist with available objects. */ -static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); -static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ +struct kfence_freelist_node { + struct list_head freelist; + raw_spinlock_t lock; +}; + +struct kfence_freelist_cpu { + struct list_head freelist; + unsigned long count; +}; + +struct kfence_freelist { + struct kfence_freelist_node *node; + struct kfence_freelist_cpu __percpu *cpu; +}; +static struct kfence_freelist freelist; +static atomic_t kfence_flush_res, kfence_refkill_res; /* * The static key to set up a KFENCE allocation; or if static keys are not used @@ -150,11 +299,11 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM */ #define ALLOC_COVERED_HNUM 2 -#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) -#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) -#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) +static unsigned long alloc_covered_order __ro_after_init; +#define ALLOC_COVERED_HNEXT(h) hash_32(h, alloc_covered_order) +#define ALLOC_COVERED_SIZE (1 << alloc_covered_order) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) -static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; +static atomic_t *alloc_covered __read_mostly; /* Stack depth used to determine uniqueness of an allocation. */ #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) @@ -177,7 +326,10 @@ enum kfence_counter_id { KFENCE_COUNTER_SKIP_COVERED, KFENCE_COUNTER_COUNT, }; -static atomic_long_t counters[KFENCE_COUNTER_COUNT]; +struct kfence_counter { + s64 counter[KFENCE_COUNTER_COUNT]; +}; +static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { [KFENCE_COUNTER_ALLOCATED] = "currently allocated", [KFENCE_COUNTER_ALLOCS] = "total allocations", @@ -194,13 +346,28 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); static inline bool should_skip_covered(void) { - unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; + unsigned long thresh; + s64 sum; + int cpu; + + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return false; - return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; + thresh = (kfence_num_objects_snap * kfence_skip_covered_thresh) / 100; + sum = 0; + /* This may take some time but should be acceptable in sampling mode. */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[KFENCE_COUNTER_ALLOCATED]; + + return sum > thresh; } static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) { + if (!kfence_num_objects_snap) + return 0; + num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); num_entries = filter_irq_stacks(stack_entries, num_entries); return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); @@ -210,10 +377,14 @@ static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries * Adds (or subtracts) count @val for allocation stack trace hash * @alloc_stack_hash from Counting Bloom filter. */ -static void alloc_covered_add(u32 alloc_stack_hash, int val) +static inline void alloc_covered_add(u32 alloc_stack_hash, int val) { int i; + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return; + for (i = 0; i < ALLOC_COVERED_HNUM; i++) { atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); @@ -249,14 +420,14 @@ static bool kfence_unprotect(unsigned long addr) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { - unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; - unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; + struct kfence_pool_area *kpa = meta->kpa; + unsigned long offset = (meta - kpa->meta + 1) * PAGE_SIZE * 2; + unsigned long pageaddr = (unsigned long)&kpa->addr[offset]; /* The checks do not affect performance; only called from slow-paths. */ /* Only call with a pointer into kfence_metadata. */ - if (KFENCE_WARN_ON(meta < kfence_metadata || - meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) + if (KFENCE_WARN_ON(meta < kpa->meta || meta >= kpa->meta + kpa->nr_objects)) return 0; /* @@ -280,8 +451,6 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex struct kfence_track *track = next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; - lockdep_assert_held(&meta->lock); - if (stack_entries) { memcpy(track->stack_entries, stack_entries, num_stack_entries * sizeof(stack_entries[0])); @@ -314,7 +483,7 @@ static inline bool check_canary_byte(u8 *addr) if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) return true; - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; meta = addr_to_metadata((unsigned long)addr); raw_spin_lock_irqsave(&meta->lock, flags); @@ -327,24 +496,36 @@ static inline bool check_canary_byte(u8 *addr) static inline void set_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * The canary may be written to part of the object memory, but it does * not affect it. The user should initialize the object before using it. */ - for (; addr < meta->addr; addr += sizeof(u64)) + for (addr = start; addr < meta->addr; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) + for (; addr < end; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; } static inline void check_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * We'll iterate over each canary byte per-side until a corrupted byte @@ -356,7 +537,7 @@ static inline void check_canary(const struct kfence_metadata *meta) */ /* Apply to left of object. */ - for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { + for (addr = start; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) break; } @@ -376,7 +557,7 @@ static inline void check_canary(const struct kfence_metadata *meta) if (unlikely(!check_canary_byte((u8 *)addr))) return; } - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { + for (; addr < end; addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) { for (; addr - pageaddr < PAGE_SIZE; addr++) { @@ -387,48 +568,103 @@ static inline void check_canary(const struct kfence_metadata *meta) } } -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, - unsigned long *stack_entries, size_t num_stack_entries, - u32 alloc_stack_hash) +static inline struct kfence_metadata * +get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) { - struct kfence_metadata *meta = NULL; + struct kfence_metadata *object = NULL; unsigned long flags; - struct slab *slab; - void *addr; - const bool random_right_allocate = get_random_u32_below(2); - const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && - !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - /* Try to obtain a free object. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - if (!list_empty(&kfence_freelist)) { - meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); - list_del_init(&meta->list); - } - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); - if (!meta) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); - return NULL; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + if (!list_empty(&kfence_freelist->freelist)) { + object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); + list_del_init(&object->list); } + percpu_ref_get(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); - if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { - /* - * This is extremely unlikely -- we are reporting on a - * use-after-free, which locked meta->lock, and the reporting - * code via printk calls kmalloc() which ends up in - * kfence_alloc() and tries to grab the same object that we're - * reporting on. While it has never been observed, lockdep does - * report that there is a possibility of deadlock. Fix it by - * using trylock and bailing out gracefully. - */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - /* Put the object back on the freelist. */ - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + return object; +} - return NULL; +#define KFENCE_FREELIST_PERCPU_SIZE 100 + +static struct kfence_metadata * +get_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct kfence_metadata *object = NULL; + struct list_head *entry = &kfence_freelist->freelist; + + KFENCE_WARN_ON(!list_empty(&c->freelist)); + + raw_spin_lock(&kfence_freelist->lock); + + if (list_empty(&kfence_freelist->freelist)) + goto out; + + object = list_first_entry(entry, struct kfence_metadata, list); + list_del_init(&object->list); + + do { + entry = READ_ONCE(entry->next); + + if (entry == &kfence_freelist->freelist) { + entry = entry->prev; + break; + } + + c->count++; + } while (c->count < KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&c->freelist, &kfence_freelist->freelist, entry); + +out: + raw_spin_unlock(&kfence_freelist->lock); + + return object; +} + +static struct kfence_metadata *get_free_meta(int real_node) +{ + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *object; + int node = kfence_node_map[real_node]; + + if (node >= 0) + kfence_freelist = &freelist.node[node]; + else + kfence_freelist = &freelist.node[real_node]; + + /* If target page not on current node, directly get from its nodelist */ + if (unlikely(node != kfence_node_map[numa_node_id()] || kfence_num_objects_snap)) + return get_free_meta_from_node(kfence_freelist); + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + if (unlikely(!c->count)) { + object = get_free_meta_slowpath(c, kfence_freelist); + } else { + object = list_first_entry(&c->freelist, struct kfence_metadata, list); + list_del_init(&object->list); + c->count--; } + percpu_ref_get(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); + + return object; +} + +static inline void __init_meta(struct kfence_metadata *meta, size_t size, struct kmem_cache *cache, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + meta->addr = metadata_to_pageaddr(meta); /* Unprotect if we're reusing this page. */ if (meta->state == KFENCE_OBJECT_FREED) @@ -442,27 +678,72 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (random_right_allocate) { + if (cache && this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS] % 2) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); } - addr = (void *)meta->addr; - /* Update remaining metadata. */ metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ WRITE_ONCE(meta->cache, cache); meta->size = size; meta->alloc_stack_hash = alloc_stack_hash; +} + +static void put_free_meta(struct kfence_metadata *object); +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash, int node) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + struct slab *slab; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + */ + /* Put the object back on the freelist. */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, size, cache, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); + addr = (void *)meta->addr; alloc_covered_add(alloc_stack_hash, 1); /* Set required slab fields. */ - slab = virt_to_slab((void *)meta->addr); + page = virt_to_page(addr); + slab = page_slab(page); + __SetPageSlab(page); slab->slab_cache = cache; +#ifdef CONFIG_MEMCG + slab->memcg_data = (unsigned long)meta->objcg | MEMCG_DATA_OBJCGS; +#endif #if defined(CONFIG_SLUB) slab->objects = 1; #elif defined(CONFIG_SLAB) @@ -485,15 +766,74 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS]++; return addr; } -static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +static inline void put_free_meta_to_node(struct kfence_metadata *object, + struct kfence_freelist_node *kfence_freelist) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_add_tail(&object->list, &kfence_freelist->freelist); + percpu_ref_put(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static void put_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct list_head *entry = &c->freelist, new_list; + + do { + entry = entry->next; + c->count--; + } while (c->count > KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&new_list, &c->freelist, entry); + raw_spin_lock(&kfence_freelist->lock); + list_splice_tail(&new_list, &kfence_freelist->freelist); + raw_spin_unlock(&kfence_freelist->lock); +} + +static void put_free_meta(struct kfence_metadata *object) +{ + int node = object->kpa->node; + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist = &freelist.node[node]; + + KFENCE_WARN_ON(!list_empty(&object->list)); + + /* If meta not on current node, just return it to its own nodelist */ + if (unlikely(!kfence_node_map || node != kfence_node_map[numa_node_id()] || + kfence_num_objects_snap)) { + put_free_meta_to_node(object, kfence_freelist); + return; + } + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + list_add_tail(&object->list, &c->freelist); + c->count++; + + if (unlikely(c->count == KFENCE_FREELIST_PERCPU_SIZE * 2)) + put_free_meta_slowpath(c, kfence_freelist); + + percpu_ref_put(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); +} + +static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zombie, bool is_page) { struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); unsigned long flags; bool init; @@ -501,11 +841,11 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { /* Invalid or double-free, bail out. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + this_cpu_counter->counter[KFENCE_COUNTER_BUGS]++; kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_INVALID_FREE); raw_spin_unlock_irqrestore(&meta->lock, flags); - return; + return false; } /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ @@ -525,38 +865,50 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z /* Mark the object as freed. */ metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); - init = slab_want_init_on_free(meta->cache); + if (!is_page) + init = slab_want_init_on_free(meta->cache); + raw_spin_unlock_irqrestore(&meta->lock, flags); alloc_covered_add(meta->alloc_stack_hash, -1); - /* Check canary bytes for memory corruption. */ - check_canary(meta); + if (!is_page) { + /* Check canary bytes for memory corruption. */ + check_canary(meta); - /* - * Clear memory if init-on-free is set. While we protect the page, the - * data is still there, and after a use-after-free is detected, we - * unprotect the page, so the data is still accessible. - */ - if (!zombie && unlikely(init)) - memzero_explicit(addr, meta->size); + /* + * Clear memory if init-on-free is set. While we protect the page, the + * data is still there, and after a use-after-free is detected, we + * unprotect the page, so the data is still accessible. + */ + if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + memzero_explicit(addr, meta->size); + } /* Protect to detect use-after-frees. */ kfence_protect((unsigned long)addr); kcsan_end_scoped_access(&assert_page_exclusive); + + return true; +} + +static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, zombie, false)) + return; + if (!zombie) { /* Add it to the tail of the freelist for reuse. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - KFENCE_WARN_ON(!list_empty(&meta->list)); - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + put_free_meta(meta); - atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES]++; } else { /* See kfence_shutdown_cache(). */ - atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); + this_cpu_counter->counter[KFENCE_COUNTER_ZOMBIES]++; } } @@ -567,22 +919,39 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -/* - * Initialization of the KFENCE pool after its allocation. - * Returns 0 on success; otherwise returns the address up to - * which partial initialization succeeded. - */ -static unsigned long kfence_init_pool(void) +static void kfence_clear_page_info(unsigned long addr, unsigned long size) +{ + unsigned long i; + + for (i = addr; i < addr + size; i += PAGE_SIZE) { + struct page *page = virt_to_page((void *)i); + + if (PageSlab(page)) { +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + __ClearPageKfence(page); + page->mapping = NULL; + atomic_set(&page->_refcount, 1); + kfence_unprotect(i); + } +} + +static bool __kfence_init_pool_area(struct kfence_pool_area *kpa) { - unsigned long addr; + char *__kfence_pool = kpa->addr; + struct kfence_metadata *kfence_metadata = kpa->meta; + struct kfence_freelist_node *kfence_freelist = &freelist.node[kpa->node]; + unsigned long addr = (unsigned long)__kfence_pool, flags; struct page *pages; int i; - if (!arch_kfence_init_pool()) - return (unsigned long)__kfence_pool; + if (!__kfence_pool_early_init && !arch_kfence_init_pool(kpa)) + goto err; - addr = (unsigned long)__kfence_pool; - pages = virt_to_page(__kfence_pool); + pages = virt_to_page((void *)addr); /* * Set up object pages: they must have PG_slab set, to avoid freeing @@ -592,17 +961,10 @@ static unsigned long kfence_init_pool(void) * fast-path in SLUB, and therefore need to ensure kfree() correctly * enters __slab_free() slow-path. */ - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; + for (i = 0; i < kpa->pool_size / PAGE_SIZE; i++) { + struct page *page = nth_page(pages, i); - __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; -#endif + __SetPageKfence(page); } /* @@ -613,96 +975,676 @@ static unsigned long kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - return addr; + goto err; addr += PAGE_SIZE; } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata_init[i]; + /* Protect the right redzone. */ + for (i = 0; i < kpa->nr_objects; i++) { + if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + goto err; + addr += 2 * PAGE_SIZE; + } + + addr = (unsigned long)__kfence_pool + 2 * PAGE_SIZE; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; /* Initialize metadata. */ INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state = KFENCE_OBJECT_UNUSED; meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); - - /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto reset_slab; + meta->kpa = kpa; + list_add_tail(&meta->list, &kfence_freelist->freelist); + /* No fail after here, since we've added this pool to freelist. */ addr += 2 * PAGE_SIZE; } + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); /* - * Make kfence_metadata visible only when initialization is successful. - * Otherwise, if the initialization fails and kfence_metadata is freed, - * it may cause UAF in kfence_shutdown_cache(). + * The pool is live and will never be deallocated from this point on. + * Remove the pool object from the kmemleak object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. */ - smp_store_release(&kfence_metadata, kfence_metadata_init); - return 0; + if (PageReserved(pages)) + kmemleak_ignore_phys(__pa(__kfence_pool)); -reset_slab: - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); + return true; - if (!i || (i % 2)) - continue; -#ifdef CONFIG_MEMCG - slab->memcg_data = 0; -#endif - __folio_clear_slab(slab_folio(slab)); - } +err: + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + return false; +} - return addr; +static bool kfence_rb_less(struct rb_node *a, const struct rb_node *b) +{ + return (unsigned long)kfence_rbentry(a)->addr < (unsigned long)kfence_rbentry(b)->addr; } -static bool __init kfence_init_pool_early(void) +static void __init kfence_alloc_pool_node(int node) { - unsigned long addr; + unsigned long nr_need = kfence_num_objects_stat[node].need; + unsigned long nr_request = min(nr_need, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long index = kfence_nr_areas_per_node * node; + + while (nr_need) { + unsigned long kfence_pool_size = (nr_request + 1) * 2 * PAGE_SIZE; + + __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, PUD_SIZE, node); + if (!__kfence_pool_area[index]) { + pr_err("kfence alloc pool on node %d failed\n", node); + break; + } + index++; + nr_need -= nr_request; + nr_request = min(nr_request, nr_need); + } +} + +static void kpa_release(struct percpu_ref *ref); +static void kfence_free_area(struct work_struct *work); +static inline bool init_kpa(struct kfence_pool_area *kpa, char *__kfence_pool, int node, + unsigned long nr_objects, unsigned long pool_size) +{ + kpa->meta = vzalloc_node(sizeof(struct kfence_metadata) * nr_objects, node); + if (!kpa->meta) + goto fail; + if (percpu_ref_init(&kpa->refcnt, kpa_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto fail; + INIT_WORK(&kpa->work, kfence_free_area); + kpa->addr = __kfence_pool; + kpa->pool_size = pool_size; + kpa->nr_objects = nr_objects; + kpa->node = node; + atomic_set(&kpa->_ref, 1); /* held by rb tree */ + + if (!__kfence_init_pool_area(kpa)) + goto fail; + + return true; + +fail: + vfree(kpa->meta); + percpu_ref_exit(&kpa->refcnt); + + return false; +} + +static bool __init kfence_init_pool_area(int node, int area) +{ + int index = node * kfence_nr_areas_per_node + area; + char *__kfence_pool = __kfence_pool_area[index]; + struct kfence_pool_area *kpa; + unsigned long nr_objects, pool_size; if (!__kfence_pool) return false; - addr = kfence_init_pool(); + nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; - if (!addr) { - /* - * The pool is live and will never be deallocated from this point on. - * Ignore the pool object from the kmemleak phys object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. - */ - kmemleak_ignore_phys(__pa(__kfence_pool)); - return true; + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, __kfence_pool, node, nr_objects, pool_size)) + goto fail; + + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + __kfence_pool_area[index] = NULL; + kfence_num_objects_stat[node].allocated += nr_objects; + + return true; + +fail: + memblock_free_late(__pa(__kfence_pool), pool_size); + __kfence_pool_area[index] = NULL; + kfree(kpa); + + return false; +} + +static bool __init kfence_init_pool(void) +{ + int area, node; + bool success_once = false; + + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + if (kfence_init_pool_area(node, area)) + success_once = true; + } + } + + return success_once; +} + +static void kfence_alloc_pool_late_node(int node, struct list_head *ready, bool fallback) +{ + unsigned long nr_need, nr_request; + struct kfence_alloc_node_cond *knos = &kfence_num_objects_stat[node]; + gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; + + if (knos->allocated >= knos->need) + return; + + nr_need = roundup(knos->need - knos->allocated, KFENCE_MAX_OBJECTS_PER_AREA); + nr_request = KFENCE_MAX_OBJECTS_PER_AREA; + if (!fallback) + gfp_mask |= __GFP_THISNODE; + + while (nr_need) { + struct page *page; + struct kfence_pool_area *kpa; + unsigned long nr_pages = (nr_request + 1) * 2; +#ifdef CONFIG_CONTIG_ALLOC + page = alloc_contig_pages(nr_pages, gfp_mask, node, NULL); +#else + pr_warn("anolis kfence only supports enabled later with CONFIG_CONTIG_ALLOC\n"); + page = NULL; +#endif + if (!page) { + pr_err("kfence alloc pool on node %d failed\n", node); + return; + } + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, page_to_virt(page), node, nr_request, nr_pages * PAGE_SIZE)) + goto fail; + + list_add(&kpa->list, ready); + nr_need -= nr_request; + knos->allocated += nr_request; + nr_request = min(nr_request, nr_need); + + continue; + +fail: +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(page), nr_pages); +#endif + kfree(kpa); + + return; + } +} + +static void kfence_free_pool_area(struct kfence_pool_area *kpa) +{ + phys_addr_t base = __pa(kpa->addr), size = kpa->pool_size; + phys_addr_t cursor = PFN_UP(base); + phys_addr_t end = PFN_DOWN(base + size); + + kmemleak_free_part_phys(base, size); + for (; cursor < end; cursor++) { + __free_pages_core(pfn_to_page(cursor), 0); + totalram_pages_inc(); + } +} + +static void kfence_free_pool_late_area(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(kpa->addr)), kpa->pool_size / PAGE_SIZE); +#endif +} + +static void get_kpa(struct kfence_pool_area *kpa) +{ + atomic_inc(&kpa->_ref); +} + +static void put_kpa(struct kfence_pool_area *kpa) +{ + if (atomic_dec_and_test(&kpa->_ref)) + kfree(kpa); +} + +static int kfence_update_pool_root(void *info) +{ + struct list_head *ready_list = info; + struct kfence_pool_area *kpa; + struct rb_node *cur, *next; + + for (cur = rb_first(&kfence_pool_root); cur; cur = next) { + kpa = kfence_rbentry(cur); + next = rb_next(cur); + if (!kpa->nr_objects) { + rb_erase(&kpa->rb_node, &kfence_pool_root); + put_kpa(kpa); + } else { + percpu_ref_resurrect(&kpa->refcnt); + } + } + + while (!list_empty(ready_list)) { + kpa = list_first_entry(ready_list, struct kfence_pool_area, list); + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + list_del(&kpa->list); } + return 0; +} + +/* + * Flush this cpu's per cpu freelist to per node freelist. + * + * We don't need more sync methods to prevent race, because we can + * only reach here in two routes (with both kfence is disabled + * so no new allocatings will occur): + * + * 1) from update_kfence_node_map() when enabling kfence + * Since kfence_node_map is set to NULL, the objects + * will be directly freed to the per node freelist. + * + * 2) from kfence_free_area() when a kpa being released + * Since the refcnt of this kpa is down to 0, no objects + * from this kpa will be freed to per cpu freelist. + * If some objects from other kpas are freed after this + * check, it is ok because we will only free the space + * of our target kpa. Just let objects from other kpas + * remain in per cpu freelist. + */ +static void kfence_flush(struct kfence_freelist_cpu *c) +{ + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *meta; + unsigned long flags; + + if (list_empty(&c->freelist)) { + if (KFENCE_WARN_ON(c->count)) + c->count = 0; + return; + } + + meta = list_first_entry(&c->freelist, struct kfence_metadata, list); + kfence_freelist = &freelist.node[meta->kpa->node]; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_splice_tail_init(&c->freelist, &kfence_freelist->freelist); + c->count = 0; + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_flush_wait); +static void kfence_flush_call(void *info) +{ + struct kfence_freelist_cpu *c = get_cpu_ptr(freelist.cpu); + + kfence_flush(c); + put_cpu_ptr(c); + + if (!atomic_dec_return(&kfence_flush_res)) + wake_up(&kfence_flush_wait); +} + +/* Flush percpu freelists on all cpus and wait for return. */ +static void kfence_flush_all_and_wait(void) +{ + int cpu; + + cpus_read_lock(); + atomic_set(&kfence_flush_res, num_online_cpus()); + on_each_cpu(kfence_flush_call, NULL, 0); + + /* Flush offline cpus. */ + preempt_disable(); + for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) { + kfence_flush(per_cpu_ptr(freelist.cpu, cpu)); + } + preempt_enable(); + cpus_read_unlock(); + + wait_event_idle(kfence_flush_wait, !atomic_read(&kfence_flush_res)); +} + +static bool kfence_can_recover_tlb(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_X86_64 + /* only recover 1GiB aligned tlb */ + return kpa->pool_size == PUD_SIZE; +#else /* - * Only release unprotected pages, and do not try to go back and change - * page attributes due to risk of failing to do so as well. If changing - * page attributes for some pages fails, it is very likely that it also - * fails for the first page, and therefore expect addr==__kfence_pool in - * most failure cases. + * On arm64, the direct mapping area is already splited to page granularity + * with CONFIG_RODATA_FULL_DEFAULT_ENABLED=y, or CONFIG_KFENCE=y. So we will + * not recover tlb to pud huge. See upstream commit 840b23986344 + * ("arm64, kfence: enable KFENCE for ARM64") in detail. */ - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); - __kfence_pool = NULL; + return false; +#endif +} + +static inline void __kfence_recover_tlb(unsigned long addr) +{ + if (!arch_kfence_free_pool(addr)) + pr_warn("fail to recover tlb to 1G at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); +} + +static inline void kfence_recover_tlb(struct kfence_pool_area *kpa) +{ + unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, PUD_SIZE); + + if (kfence_can_recover_tlb(kpa)) + __kfence_recover_tlb(base); +} + +/* Free a specific area. The refcnt has been down to 0. */ +static void kfence_free_area(struct work_struct *work) +{ + unsigned long flags, i; + struct page *page; + struct kfence_pool_area *kpa = container_of(work, struct kfence_pool_area, work); + struct kfence_freelist_node *kfence_freelist; + + mutex_lock(&kfence_mutex); + if (!kpa->nr_objects || !percpu_ref_is_zero(&kpa->refcnt)) + goto out_unlock; + + kfence_flush_all_and_wait(); + + kfence_freelist = &freelist.node[kpa->node]; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) + list_del(&kpa->meta[i].list); + + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + pr_info("freed %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); - kfence_metadata_init = NULL; + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + kfence_recover_tlb(kpa); + page = virt_to_page(kpa->addr); + + if (PageReserved(page)) + kfence_free_pool_area(kpa); + else + kfence_free_pool_late_area(kpa); + + vfree(kpa->meta); + kpa->meta = NULL; + percpu_ref_exit(&kpa->refcnt); + kpa->nr_objects = 0; + kpa->pool_size = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + put_kpa(kpa); +} + +static void kpa_release(struct percpu_ref *ref) +{ + struct kfence_pool_area *kpa = container_of(ref, struct kfence_pool_area, refcnt); + + get_kpa(kpa); + if (!queue_work(system_long_wq, &kpa->work)) + put_kpa(kpa); +} + +static void calculate_need_alloc(void) +{ + int node, nr_kpas, base, remain, nr_node_has_cpu; + enum node_states node_stat = N_CPU; + + if (!kfence_num_objects_stat) + return; + + if (kfence_pool_node_mode) { + for_each_node(node) { + kfence_num_objects_stat[node].need = kfence_num_objects; + } + return; + } + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + kfence_num_objects_stat[first_online_node].need = kfence_num_objects; + return; + } + + /* In global mode, we only alloc on nodes with cpus (i.e., not on pmem nodes) */ + nr_node_has_cpu = num_node_state(node_stat); + if (!nr_node_has_cpu) { + node_stat = N_ONLINE; + nr_node_has_cpu = num_node_state(node_stat); + } + nr_kpas = kfence_num_objects / KFENCE_MAX_OBJECTS_PER_AREA; + base = nr_kpas / nr_node_has_cpu; + remain = nr_kpas - base * nr_node_has_cpu; + for_each_node_state(node, node_stat) { + kfence_num_objects_stat[node].need = (base + (!!remain)) * + KFENCE_MAX_OBJECTS_PER_AREA; + if (remain) + remain--; + } +} + +static inline bool __check_map_change(int *new_node_map) +{ + int node; + + for_each_node(node) { + if (kfence_node_map[node] != new_node_map[node]) + return true; + } return false; } +static void update_kfence_node_map(int *new_node_map) +{ + int *old_node_map; + int node; + enum node_states node_stat = N_CPU; + struct zonelist *zonelist; + struct zone *zone; + struct zoneref *z; + + memset(new_node_map, -1, sizeof(int) * nr_node_ids); + + if (!num_node_state(node_stat)) + node_stat = N_ONLINE; + + for_each_node_state(node, node_stat) { + if (kfence_num_objects_stat[node].allocated) { + new_node_map[node] = node; + continue; + } + + /* We borrow from zonelist to get the nearest node to map. */ + zonelist = node_zonelist(node, GFP_KERNEL); + for_each_zone_zonelist_nodemask(zone, z, zonelist, ZONE_NORMAL, NULL) { + if (kfence_num_objects_stat[zone_to_nid(zone)].allocated) { + new_node_map[node] = zone_to_nid(zone); + break; + } + } + } + + /* It's the first time of init */ + if (!kfence_node_map) { + kfence_node_map = new_node_map; + return; + } + + if (!__check_map_change(new_node_map)) { + kfree(new_node_map); + return; + } + + old_node_map = kfence_node_map; + kfence_node_map = NULL; + synchronize_rcu(); + + kfence_flush_all_and_wait(); + + kfence_node_map = new_node_map; + kfree(old_node_map); +} + +/* + * Get the last kfence.booting_max= from boot cmdline. + * Mainly copied from get_last_crashkernel(). + */ +static __init char *get_last_kfence_booting_max(char *name) +{ + char *p = boot_command_line, *ck_cmdline = NULL; + + /* find kfence.booting_max and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + + if (!end_p) + end_p = p + strlen(p); + ck_cmdline = p; + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + ck_cmdline += strlen(name); + return ck_cmdline; +} + +/* + * This function parses command lines in the format + * + * kfence.booting_max=ramsize-range:size[,...] + * + * The function returns 0 on success and -EINVAL on failure. + * Mainly copied from parse_crashkernel_mem(). + */ +static int __init parse_kfence_booting_max(char *cmdline, + unsigned long long system_ram, + unsigned long long *reserve_max) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warn("kfence.booting_max: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warn("kfence.booting_max: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warn("kfence.booting_max: ':' expected\n"); + return -EINVAL; + } + cur++; + + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + + /* match ? */ + if (system_ram >= start && system_ram < end) { + *reserve_max = size; + break; + } + } while (*cur++ == ','); + + if (!*reserve_max) + pr_info("kfence.booting_max size resulted in zero bytes, disabled\n"); + + return 0; +} + /* === DebugFS Interface ==================================================== */ +static void print_pool_size(struct seq_file *seq, unsigned long byte) +{ + if (byte < SZ_1K) + seq_printf(seq, "%lu B\n", byte); + else if (byte < SZ_1M) + seq_printf(seq, "%lu KB\n", byte / SZ_1K); + else if (byte < SZ_1G) + seq_printf(seq, "%lu MB\n", byte / SZ_1M); + else + seq_printf(seq, "%lu GB\n", byte / SZ_1G); +} + static int stats_show(struct seq_file *seq, void *v) { - int i; + int i, cpu; + struct kfence_pool_area *kpa; + struct rb_node *iter; + unsigned long *size_count; seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - for (i = 0; i < KFENCE_COUNTER_COUNT; i++) - seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); + + if (!counters) + return 0; + + for (i = 0; i < KFENCE_COUNTER_COUNT; i++) { + s64 sum = 0; + /* + * This calculation may not accurate, but don't mind since we are + * mostly interested in bugs and zombies. They are rare and likely + * not changed during calculating. + */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[i]; + seq_printf(seq, "%-35s:%20lld\n", counter_names[i], sum); + } + + size_count = kmalloc_array(nr_node_ids * 2, sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); + if (!size_count) + return 0; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (!kpa->nr_objects) + continue; + size_count[kpa->node] += kpa->nr_objects; + size_count[kpa->node + nr_node_ids] += kpa->pool_size; + } + mutex_unlock(&kfence_mutex); + + seq_puts(seq, "\nnode\tobject_size\tpool_size\n"); + for_each_node(i) { + seq_printf(seq, "%-8d%-16lu", i, size_count[i]); + print_pool_size(seq, size_count[i + nr_node_ids]); + } + + kfree(size_count); return 0; } @@ -715,28 +1657,59 @@ DEFINE_SHOW_ATTRIBUTE(stats); */ static void *start_object(struct seq_file *seq, loff_t *pos) { - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); + loff_t index = *pos; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (index >= kpa->nr_objects) { + index -= kpa->nr_objects; + continue; + } + return &kpa->meta[index]; + } return NULL; } static void stop_object(struct seq_file *seq, void *v) { + mutex_unlock(&kfence_mutex); } static void *next_object(struct seq_file *seq, void *v, loff_t *pos) { + struct kfence_metadata *meta = (struct kfence_metadata *)v; + struct kfence_pool_area *kpa = meta->kpa; + struct rb_node *cur = &kpa->rb_node; + ++*pos; - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); - return NULL; + ++meta; + if (meta - kpa->meta < kpa->nr_objects) + return meta; + seq_puts(seq, "---------------------------------\n"); +next_meta: + cur = rb_next(cur); + if (!cur) + return NULL; + kpa = kfence_rbentry(cur); + if (!kpa->nr_objects) + goto next_meta; + + return kpa->meta; } static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta = (struct kfence_metadata *)v; unsigned long flags; + char buf[20]; + if (!meta) + return 0; + + sprintf(buf, "node %d:\n", meta->kpa->node); + seq_puts(seq, buf); raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -753,14 +1726,10 @@ static const struct seq_operations objects_sops = { }; DEFINE_SEQ_ATTRIBUTE(objects); -static int kfence_debugfs_init(void) +static int __init kfence_debugfs_init(void) { - struct dentry *kfence_dir; - - if (!READ_ONCE(kfence_enabled)) - return 0; + struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); - kfence_dir = debugfs_create_dir("kfence", NULL); debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; @@ -772,13 +1741,17 @@ late_initcall(kfence_debugfs_init); static void kfence_check_all_canary(void) { + struct kfence_pool_area *kpa; + struct rb_node *iter; int i; - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata[i]; + kfence_for_each_area(kpa, iter) { + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kpa->meta[i]; - if (meta->state == KFENCE_OBJECT_ALLOCATED) - check_canary(meta); + if (meta->state == KFENCE_OBJECT_ALLOCATED) + check_canary(meta); + } } } @@ -840,36 +1813,79 @@ static void toggle_allocation_gate(struct work_struct *work) /* === Public interface ===================================================== */ -void __init kfence_alloc_pool_and_metadata(void) +int __init update_kfence_booting_max(void) { - if (!kfence_sample_interval) - return; + static bool done __initdata; + + unsigned long long parse_mem = PUD_SIZE; + unsigned long nr_pages, nr_obj_max; + char *cmdline; + int ret; /* - * If the pool has already been initialized by arch, there is no need to - * re-allocate the memory pool. + * We may reach here twice because some arch like aarch64 + * will call this function first. */ - if (!__kfence_pool) - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (done) + return 0; + done = true; - if (!__kfence_pool) { - pr_err("failed to allocate pool\n"); - return; - } + /* Boot cmdline is not set. Just leave. */ + cmdline = get_last_kfence_booting_max("kfence.booting_max="); + if (!cmdline) + return 0; + + ret = parse_kfence_booting_max(cmdline, memblock_phys_mem_size(), &parse_mem); + /* disable booting kfence on parsing fail. */ + if (ret) + goto nokfence; + + nr_pages = min_t(unsigned long, parse_mem, PUD_SIZE) / PAGE_SIZE; + /* We need at least 4 pages to enable KFENCE. */ + if (nr_pages < 4) + goto nokfence; - /* The memory allocated by memblock has been zeroed out. */ - kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); - if (!kfence_metadata_init) { - pr_err("failed to allocate metadata\n"); - memblock_free(__kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = NULL; + nr_obj_max = nr_pages / 2 - 1; + if (kfence_num_objects > nr_obj_max) { + kfence_num_objects = nr_obj_max; + return 1; } + + return 0; + +nokfence: + kfence_num_objects = 0; + return 1; } -static void kfence_init_enable(void) +/* Only run for the first time. */ +static bool kfence_setup_once(void) { - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + int i; + + /* + * freelist.node, freelist.cpu, counters are inited together, + * we only need to check one of them and know whether + * we are now in re-enabling. + */ + if (counters) + return true; + + freelist.node = kmalloc_array(nr_node_ids, sizeof(struct kfence_freelist_node), + GFP_KERNEL); + freelist.cpu = alloc_percpu(struct kfence_freelist_cpu); + counters = alloc_percpu(struct kfence_counter); + + if (!freelist.node || !freelist.cpu || !counters) + goto fail; + + for_each_node(i) { + INIT_LIST_HEAD(&freelist.node[i].freelist); + raw_spin_lock_init(&freelist.node[i].lock); + } + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu_ptr(freelist.cpu, i)->freelist); if (kfence_deferrable) INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); @@ -879,119 +1895,328 @@ static void kfence_init_enable(void) if (kfence_check_on_panic) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + return true; + +fail: + kfree(freelist.node); + freelist.node = NULL; + free_percpu(freelist.cpu); + freelist.cpu = NULL; + free_percpu(counters); + counters = NULL; + return false; +} + +static void start_kfence(void) +{ + unsigned long total_nr_objects = 0; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + kfence_for_each_area(kpa, iter) { + pr_info("initialized - using %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + total_nr_objects += kpa->nr_objects; + } + + /* Update kfence_num_objects to export to /sys/module/ */ + if (total_nr_objects > KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = rounddown(total_nr_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects = total_nr_objects; + + /* Forget upstream mode. */ + if (kfence_num_objects_snap && total_nr_objects > kfence_num_objects_snap) { + kfence_num_objects_snap = 0; + kvfree(alloc_covered); + alloc_covered = NULL; + } + WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + static_branch_enable(&kfence_once_enabled); + static_branch_enable(&kfence_allocation_key); + if (kfence_sample_interval < 0) { + static_branch_enable(&kfence_short_canary); + static_branch_enable(&kfence_skip_interval); + } else { + static_branch_disable(&kfence_skip_interval); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + } +} + +void __init kfence_alloc_pool_and_metadata(void) +{ + int node; + + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ + if (!READ_ONCE(kfence_sample_interval)) + return; + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + /* + * Not allow both pool size < 1GiB and enabling node mode. + * Not allow both pool size < 1GiB and non-interval alloc. + */ + if (kfence_pool_node_mode || kfence_sample_interval < 0) + goto fail; + + /* + * Only limit upstream mode for online environment, + * as it makes no sense for limiting debug setup. + */ + update_kfence_booting_max(); + if (!kfence_num_objects) + goto fail; + } + + kfence_num_objects_stat = memblock_alloc(sizeof(struct kfence_alloc_node_cond) * + nr_node_ids, PAGE_SIZE); + if (!kfence_num_objects_stat) + goto fail; + + /* + * If pool size less than 1GiB, use the upstream mode; + * else, align pool size up to 1GiB, for tlb split and + * recover thought. + */ + if (kfence_num_objects >= KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects_snap = kfence_num_objects; - pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + calculate_need_alloc(); + + for_each_node(node) { + if (kfence_nr_areas_per_node < kfence_num_objects_stat[node].need) + kfence_nr_areas_per_node = kfence_num_objects_stat[node].need; + } + kfence_nr_areas_per_node /= KFENCE_MAX_OBJECTS_PER_AREA; + if (!kfence_nr_areas_per_node) + kfence_nr_areas_per_node = 1; + + __kfence_pool_area = memblock_alloc(sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node, PAGE_SIZE); + if (!__kfence_pool_area) + goto fail; + + if (__kfence_pool_early_init) { + __kfence_pool_area[first_online_node] = __kfence_pool_early_init; + return; + } + + for_each_node(node) + kfence_alloc_pool_node(node); + + return; + +fail: + if (kfence_num_objects_stat) { + memblock_free(kfence_num_objects_stat, + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } + WRITE_ONCE(kfence_sample_interval, 0); } void __init kfence_init(void) { + unsigned long nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long kfence_pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + int node, area, index; + int *new_node_map; + stack_hash_seed = get_random_u32(); /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!READ_ONCE(kfence_sample_interval)) return; - if (!kfence_init_pool_early()) { - pr_err("%s failed\n", __func__); - return; + if (!kfence_setup_once()) + goto fail_alloc; + + if (kfence_num_objects_snap) { + alloc_covered_order = ilog2(kfence_num_objects_snap) + 2; + alloc_covered = kvmalloc_array(ALLOC_COVERED_SIZE, sizeof(atomic_t), + GFP_KERNEL | __GFP_ZERO); + if (!alloc_covered) + goto fail_alloc; + } + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail_coverd; + + if (!kfence_init_pool()) { + pr_err("%s failed on all nodes!\n", __func__); + goto fail_node_map; + } + + update_kfence_node_map(new_node_map); + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail_coverd: + kvfree(alloc_covered); + alloc_covered = NULL; +fail_alloc: + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + index = kfence_nr_areas_per_node * node + area; + if (__kfence_pool_area[index]) { + memblock_free_late(__pa(__kfence_pool_area[index]), + kfence_pool_size); + __kfence_pool_area[index] = NULL; + } + } } - kfence_init_enable(); +out: + memblock_free_late(__pa(__kfence_pool_area), sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node); + __kfence_pool_area = NULL; + memblock_free_late(__pa(kfence_num_objects_stat), + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_refkill_wait); +static void kfence_kill_confirm(struct percpu_ref *ref) +{ + if (!atomic_dec_return(&kfence_refkill_res)) + wake_up(&kfence_refkill_wait); } -static int kfence_init_late(void) +static void kfence_enable_late(void) { - const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE; - const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE; - unsigned long addr = (unsigned long)__kfence_pool; - unsigned long free_size = KFENCE_POOL_SIZE; - int err = -ENOMEM; + struct kfence_pool_area *kpa; + LIST_HEAD(ready_list); + struct rb_node *iter; + int *new_node_map; + int node; -#ifdef CONFIG_CONTIG_ALLOC - struct page *pages; + if (!READ_ONCE(kfence_sample_interval)) + return; - pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, - NULL); - if (!pages) - return -ENOMEM; + /* + * If kfence pool is initialized later, the early init kfence pool has + * been released, reset the pointer here to avoid re-initialization if + * split_linear_mapping disabled. + */ + __kfence_pool_early_init = NULL; - __kfence_pool = page_to_virt(pages); - pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, - NULL); - if (pages) - kfence_metadata_init = page_to_virt(pages); -#else - if (nr_pages_pool > MAX_ORDER_NR_PAGES || - nr_pages_meta > MAX_ORDER_NR_PAGES) { - pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); - return -EINVAL; - } + mutex_lock(&kfence_mutex); - __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); - if (!__kfence_pool) - return -ENOMEM; + if (READ_ONCE(kfence_enabled)) + goto out; - kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); -#endif + /* + * Keep upstream mode remaining the same. + * Otherwise we "forget" the upstream version whose pool size < 1GiB. + */ + if (kfence_num_objects > kfence_num_objects_snap || kfence_pool_node_mode) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); - if (!kfence_metadata_init) - goto free_pool; + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA && kfence_sample_interval < 0) + goto fail; - memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE); - addr = kfence_init_pool(); - if (!addr) { - kfence_init_enable(); - kfence_debugfs_init(); - return 0; + if (!kfence_setup_once()) + goto fail; + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail; + + kfence_num_objects_stat = kmalloc_array(nr_node_ids, sizeof(struct kfence_alloc_node_cond), + GFP_KERNEL | __GFP_ZERO); + if (!kfence_num_objects_stat) + goto fail_node_map; + + calculate_need_alloc(); + + kfence_for_each_area(kpa, iter) { + if (kpa->nr_objects >= KFENCE_MAX_OBJECTS_PER_AREA || kfence_num_objects_snap) + kfence_num_objects_stat[kpa->node].allocated += kpa->nr_objects; } - pr_err("%s failed\n", __func__); - free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); - err = -EBUSY; + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, false); -#ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)), - nr_pages_meta); -free_pool: - free_contig_range(page_to_pfn(virt_to_page((void *)addr)), - free_size / PAGE_SIZE); -#else - free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE); -free_pool: - free_pages_exact((void *)addr, free_size); -#endif + /* + * Try to alloc again if there exists some nodes we fail to alloc on. + * These nodes may have no enough contig memory, so fallback to find on + * other nodes. + */ + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, true); + + update_kfence_node_map(new_node_map); + kfree(kfence_num_objects_stat); + kfence_num_objects_stat = NULL; - kfence_metadata_init = NULL; - __kfence_pool = NULL; - return err; + stop_machine(kfence_update_pool_root, &ready_list, NULL); + + if (RB_EMPTY_ROOT(&kfence_pool_root)) + goto fail; + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail: + WRITE_ONCE(kfence_sample_interval, 0); +out: + mutex_unlock(&kfence_mutex); } -static int kfence_enable_late(void) +void kfence_disable(void) { - if (!__kfence_pool) - return kfence_init_late(); + struct kfence_pool_area *kpa; + struct rb_node *iter; - WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("re-enabled\n"); - return 0; + mutex_lock(&kfence_mutex); + + if (!xchg(&kfence_enabled, false)) + goto out_unlock; + + synchronize_rcu(); + + atomic_set(&kfence_allocation_gate, 1); +#ifdef CONFIG_KFENCE_STATIC_KEYS + wake_up(&allocation_wait); +#endif + static_branch_disable(&kfence_allocation_key); + + atomic_set(&kfence_refkill_res, 0); + kfence_for_each_area(kpa, iter) { + atomic_inc(&kfence_refkill_res); + percpu_ref_kill_and_confirm(&kpa->refcnt, kfence_kill_confirm); + } + + /* + * We must wait here until all percpu_ref being killed. + * After all tasks finished, then release the mutex lock. + */ + wait_event_idle(kfence_refkill_wait, !atomic_read(&kfence_refkill_res)); + +out_unlock: + mutex_unlock(&kfence_mutex); } -void kfence_shutdown_cache(struct kmem_cache *s) +static void kfence_shutdown_cache_area(struct kmem_cache *s, struct kfence_pool_area *kpa) { unsigned long flags; - struct kfence_metadata *meta; + struct kfence_metadata *meta, *kfence_metadata = kpa->meta; int i; - /* Pairs with release in kfence_init_pool(). */ - if (!smp_load_acquire(&kfence_metadata)) - return; - - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { bool in_use; meta = &kfence_metadata[i]; @@ -1030,7 +2255,7 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { meta = &kfence_metadata[i]; /* See above. */ @@ -1044,7 +2269,19 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +void kfence_shutdown_cache(struct kmem_cache *s) +{ + struct kfence_pool_area *kpa; + struct rb_node *iter; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return; + + kfence_for_each_area(kpa, iter) + kfence_shutdown_cache_area(s, kpa); +} + +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) { unsigned long stack_entries[KFENCE_STACK_DEPTH]; size_t num_stack_entries; @@ -1055,7 +2292,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * we don't disable KFENCE without making an allocation. */ if (size > PAGE_SIZE) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1066,7 +2303,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) */ if ((flags & GFP_ZONEMASK) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1077,6 +2314,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (s->flags & SLAB_SKIP_KFENCE) return NULL; + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + if (atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -1093,28 +2333,34 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) } #endif +alloc: if (!READ_ONCE(kfence_enabled)) return NULL; num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); - /* - * Do expensive check for coverage of allocation in slow-path after - * allocation_gate has already become non-zero, even though it might - * mean not making any allocation within a given sample interval. - * - * This ensures reasonable allocation coverage when the pool is almost - * full, including avoiding long-lived allocations of the same source - * filling up the pool (e.g. pagecache allocations). - */ - alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); - if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); - return NULL; + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } } + if (node == NUMA_NO_NODE) + node = numa_node_id(); + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, - alloc_stack_hash); + alloc_stack_hash, node); } size_t kfence_ksize(const void *addr) @@ -1130,7 +2376,12 @@ size_t kfence_ksize(const void *addr) void *kfence_object_start(const void *addr) { - const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + struct kfence_metadata *meta; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return NULL; + + meta = addr_to_metadata((unsigned long)addr); /* * Read locklessly -- if there is a race with __kfence_alloc(), this is @@ -1160,18 +2411,25 @@ void __kfence_free(void *addr) bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { - const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; enum kfence_error_type error_type; + struct kfence_pool_area *kpa; unsigned long flags; + int page_index; - if (!is_kfence_address((void *)addr)) + if (!static_branch_unlikely(&kfence_once_enabled)) + return false; + + kpa = get_kfence_pool_area((void *)addr); + if (!kpa) return false; if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ return kfence_unprotect(addr); /* ... unprotect and proceed. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; + + page_index = (addr - (unsigned long)kpa->addr) / PAGE_SIZE; if (page_index % 2) { /* This is a redzone, report a buffer overflow. */ diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index f46fbb03062b..071aec5feb96 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -100,46 +100,83 @@ struct kfence_metadata { #ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif + struct kfence_pool_area *kpa; }; -#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \ - CONFIG_KFENCE_NUM_OBJECTS) +extern bool kfence_panic_on_fault; +DECLARE_STATIC_KEY_FALSE(kfence_short_canary); -extern struct kfence_metadata *kfence_metadata; +/* KFENCE error types for report generation. */ +enum kfence_error_type { + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ + KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ + KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ + KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ + KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ +}; + +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type); + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); +void kfence_disable(void); +extern void __free_pages_core(struct page *page, unsigned int order); + +extern struct rb_root kfence_pool_root; +#define kfence_rbentry(cur) rb_entry((cur), struct kfence_pool_area, rb_node) +#define kfence_for_each_area(kpa, iter) \ + for ((iter) = rb_first(&kfence_pool_root); \ + (iter) && ((kpa) = kfence_rbentry((iter)));\ + (iter) = rb_next((iter))) + +/** + * get_kfence_pool_area() - find the kfence pool area of the address + * @addr: address to check + * + * Return: the kfence pool area, NULL if not a kfence address + */ +static inline struct kfence_pool_area *get_kfence_pool_area(const void *addr) +{ + struct rb_node *cur; + struct kfence_pool_area *res = NULL; + + for (cur = kfence_pool_root.rb_node; cur;) { + struct kfence_pool_area *kpa = kfence_rbentry(cur); + + if ((unsigned long)addr < (unsigned long)kpa->addr) + cur = cur->rb_left; + else { + res = kpa; + cur = cur->rb_right; + } + } + + return is_kfence_address_area(addr, res) ? res : NULL; +} static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) { long index; + struct kfence_metadata *kfence_metadata; + struct kfence_pool_area *kpa = get_kfence_pool_area((void *)addr); /* The checks do not affect performance; only called from slow-paths. */ - if (!is_kfence_address((void *)addr)) + if (!kpa) return NULL; + kfence_metadata = kpa->meta; + /* * May be an invalid index if called with an address at the edge of * __kfence_pool, in which case we would report an "invalid access" * error. */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + index = (addr - (unsigned long)kpa->addr) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= kpa->nr_objects) return NULL; return &kfence_metadata[index]; } -/* KFENCE error types for report generation. */ -enum kfence_error_type { - KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ - KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ - KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ - KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ - KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ -}; - -void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, - const struct kfence_metadata *meta, enum kfence_error_type type); - -void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); - #endif /* MM_KFENCE_KFENCE_H */ diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 95b2b84c296d..27299531307b 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -243,6 +243,7 @@ enum allocation_policy { */ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) { + long _kfence_sample_interval = kfence_sample_interval; void *alloc; unsigned long timeout, resched_after; const char *policy_name; @@ -269,13 +270,15 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled. */ - resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); do { if (test_cache) alloc = kmem_cache_alloc(test_cache, gfp); @@ -305,6 +308,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat } else if (policy == ALLOCATE_NONE) return alloc; + if (kfence_sample_interval < 0 && policy == ALLOCATE_NONE) + return alloc; + test_free(alloc); if (time_after(jiffies, resched_after)) @@ -609,7 +615,7 @@ static void test_gfpzero(struct kunit *test) int i; /* Skip if we think it'd take too long. */ - KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100 && kfence_num_objects <= 255); setup_test_cache(test, size, 0, NULL); buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); @@ -624,7 +630,7 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) { + if (kthread_should_stop() || (i == kfence_num_objects)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } @@ -641,12 +647,19 @@ static void test_gfpzero(struct kunit *test) static void test_invalid_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .type = KFENCE_ERROR_INVALID, .fn = test_invalid_access, - .addr = &__kfence_pool[10], .is_write = false, }; + struct rb_node *cur = kfence_pool_root.rb_node; + char *__kfence_pool; + + if (!cur) + return; + + __kfence_pool = kfence_rbentry(cur)->addr; + expect.addr = &__kfence_pool[10]; READ_ONCE(__kfence_pool[10]); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -731,6 +744,7 @@ static void test_krealloc(struct kunit *test) /* Test that some objects from a bulk allocation belong to KFENCE pool. */ static void test_memcache_alloc_bulk(struct kunit *test) { + long _kfence_sample_interval = kfence_sample_interval; const size_t size = 32; bool pass = false; unsigned long timeout; @@ -741,7 +755,9 @@ static void test_memcache_alloc_bulk(struct kunit *test) * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), @@ -804,7 +820,7 @@ static int test_init(struct kunit *test) unsigned long flags; int i; - if (!__kfence_pool) + if (!kfence_pool_root.rb_node) return -EINVAL; spin_lock_irqsave(&observed.lock, flags); diff --git a/mm/kfence/report.c b/mm/kfence/report.c index c509aed326ce..3d1c82b8d230 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -128,6 +128,7 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) { + struct kfence_metadata *kfence_metadata = meta->kpa->meta; const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; @@ -163,7 +164,11 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, /* Do not show contents of object nor read into following guard page. */ end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) - : min(show_until_addr, PAGE_ALIGN(address))); + : static_branch_likely(&kfence_short_canary) ? + min(show_until_addr, + ALIGN(meta->addr + meta->size + 1, + L1_CACHE_BYTES)) : + min(show_until_addr, PAGE_ALIGN(address))); pr_cont("["); for (cur = (const u8 *)address; cur < end; cur++) { @@ -186,7 +191,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; - const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + ptrdiff_t object_index = -1; int num_stack_entries; int skipnr = 0; @@ -201,8 +206,11 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) return; - if (meta) + if (meta) { lockdep_assert_held(&meta->lock); + object_index = meta - meta->kpa->meta; + } + /* * Because we may generate reports in printk-unfriendly parts of the * kernel, such as scheduler code, the use of printk() could deadlock. @@ -272,7 +280,8 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r lockdep_on(); - check_panic_on_warn("KFENCE"); + if (kfence_panic_on_fault) + panic("kfence.fault=panic set ...\n"); /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5..dba95fd61ffb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3222,7 +3222,7 @@ slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, if (unlikely(!cachep)) return NULL; - objp = kfence_alloc(cachep, orig_size, flags); + objp = kfence_alloc_node(cachep, orig_size, flags, nodeid); if (unlikely(objp)) goto out; diff --git a/mm/slub.c b/mm/slub.c index f7940048138c..1922fe52198e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3461,7 +3461,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list if (!s) return NULL; - object = kfence_alloc(s, orig_size, gfpflags); + object = kfence_alloc_node(s, orig_size, gfpflags, node); if (unlikely(object)) goto out; -- Gitee From b26b84153cda291b3e3970c4492bb16b7e45add2 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 11 Mar 2024 16:48:32 +0800 Subject: [PATCH 267/953] anolis: kfence: support order-0 page check ANBZ: #8499 Single kernel page is supported by kfence now. The kernel order-0 pages (including order-0 pages from vmalloc) will be allocated from kfence pool. To exclude some specific page type(e.g., slab pages and pgtable pages), a new gfp flag is introduced. Out of bounds read/write and use after free can be detected. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- include/asm-generic/pgalloc.h | 2 +- include/linux/gfp_types.h | 10 +- include/linux/kfence.h | 61 +++++++++++ include/trace/events/mmflags.h | 13 ++- mm/kfence/core.c | 195 ++++++++++++++++++++++++++++++++- mm/kfence/report.c | 4 +- mm/page_alloc.c | 26 ++++- mm/slub.c | 1 + 8 files changed, 300 insertions(+), 12 deletions(-) diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..f7413f68f8d5 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -4,7 +4,7 @@ #ifdef CONFIG_MMU -#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO | __GFP_NOKFENCE) #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c5..7dbe56452544 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -58,6 +58,11 @@ typedef unsigned int __bitwise gfp_t; #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_KFENCE +#define ___GFP_NOKFENCE 0x8000000u +#else +#define ___GFP_NOKFENCE 0 +#endif /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -99,12 +104,15 @@ typedef unsigned int __bitwise gfp_t; * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NOKFENCE informs DO NOT try to alloc page from kfence pool. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NOKFENCE ((__force gfp_t)___GFP_NOKFENCE) /** * DOC: Watermark modifiers @@ -249,7 +257,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (28) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/kfence.h b/include/linux/kfence.h index bb24956fefd8..6771c6eea720 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -140,6 +140,7 @@ void kfence_shutdown_cache(struct kmem_cache *s); * use kfence_alloc() instead. */ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); +struct page *__kfence_alloc_page(int node, gfp_t flags); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -205,6 +206,36 @@ static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size return __kfence_alloc(s, size, flags, node); } +/** + * kfence_alloc_page() - allocate a KFENCE page with a low probability + * @node: preferred nid + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE page. + * + * the order-0 page version of kfence_alloc(). + */ +static __always_inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (order) + return NULL; + + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + + return __kfence_alloc_page(node, flags); +} + /** * kfence_ksize() - get actual amount of memory allocated for a KFENCE object * @addr: pointer to a heap object @@ -242,6 +273,7 @@ void *kfence_object_start(const void *addr); * Release a KFENCE object and mark it as freed. */ void __kfence_free(void *addr); +void __kfence_free_page(struct page *page, void *addr); /** * kfence_free() - try to release an arbitrary heap object to KFENCE pool @@ -264,6 +296,30 @@ static __always_inline __must_check bool kfence_free(void *addr) return true; } +/** + * kfence_free_page() - try to release a page to KFENCE pool + * @page: page to be freed + * + * Return: + * * false - page doesn't belong to KFENCE pool and was ignored, + * * true - page was released to KFENCE pool. + * + * Release a KFENCE page and mark it as freed. May be called on any page, + * even non-KFENCE page. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free_page(struct page *page) +{ + void *addr; + + if (!PageKfence(page)) + return false; + + addr = page_to_virt(page); + __kfence_free_page(page, addr); + return true; +} + /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address @@ -308,10 +364,15 @@ static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t f { return NULL; } +static inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_free_page(struct page *page) { return false; } static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 08fd0425f478..a548a7c240f8 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -60,9 +60,16 @@ #define __def_gfpflag_names_kasan #endif -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names __def_gfpflag_names_kasan \ +#ifdef CONFIG_KFENCE +#define __def_gfpflag_names_kfence , \ + gfpflag_string(__GFP_NOKFENCE) +#else +#define __def_gfpflag_names_kfence +#endif + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names __def_gfpflag_names_kasan __def_gfpflag_names_kfence \ ) : "none" #ifdef CONFIG_MMU diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 01d5945e1351..17f3dda4ebf7 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ DEFINE_STATIC_KEY_FALSE(kfence_short_canary); DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); +DEFINE_STATIC_KEY_TRUE(kfence_order0_page); #define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) @@ -205,6 +206,33 @@ static const struct kernel_param_ops pool_mode_param_ops = { }; module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); +static int param_set_order0_page(const char *val, const struct kernel_param *kp) +{ + bool res; + int ret = kstrtobool(val, &res); + + if (ret < 0) + return ret; + + if (res) + static_branch_enable(&kfence_order0_page); + else + static_branch_disable(&kfence_order0_page); + + return 0; +} + +static int param_get_order0_page(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%d\n", static_branch_likely(&kfence_order0_page) ? 1 : 0); +} + +static const struct kernel_param_ops order0_page_param_ops = { + .set = param_set_order0_page, + .get = param_get_order0_page, +}; +module_param_cb(order0_page, &order0_page_param_ops, NULL, 0600); + static int param_set_fault(const char *val, const struct kernel_param *kp) { bool mode; @@ -320,6 +348,9 @@ enum kfence_counter_id { KFENCE_COUNTER_ALLOCS, KFENCE_COUNTER_FREES, KFENCE_COUNTER_ZOMBIES, + KFENCE_COUNTER_ALLOCATED_PAGE, + KFENCE_COUNTER_ALLOCS_PAGE, + KFENCE_COUNTER_FREES_PAGE, KFENCE_COUNTER_BUGS, KFENCE_COUNTER_SKIP_INCOMPAT, KFENCE_COUNTER_SKIP_CAPACITY, @@ -331,10 +362,13 @@ struct kfence_counter { }; static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { - [KFENCE_COUNTER_ALLOCATED] = "currently allocated", - [KFENCE_COUNTER_ALLOCS] = "total allocations", - [KFENCE_COUNTER_FREES] = "total frees", - [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", + [KFENCE_COUNTER_ALLOCATED] = "currently slab allocated", + [KFENCE_COUNTER_ALLOCS] = "total slab allocations", + [KFENCE_COUNTER_FREES] = "total slab frees", + [KFENCE_COUNTER_ZOMBIES] = "zombie slab allocations", + [KFENCE_COUNTER_ALLOCATED_PAGE] = "currently page allocated", + [KFENCE_COUNTER_ALLOCS_PAGE] = "total page allocations", + [KFENCE_COUNTER_FREES_PAGE] = "total page frees", [KFENCE_COUNTER_BUGS] = "total bugs", [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", @@ -772,6 +806,78 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g return addr; } +static struct page *kfence_guarded_alloc_page(int node, unsigned long *stack_entries, + size_t num_stack_entries, u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + * Put the object back on the freelist. + */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, PAGE_SIZE, NULL, stack_entries, num_stack_entries, alloc_stack_hash); + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + addr = (void *)meta->addr; + alloc_covered_add(alloc_stack_hash, 1); + + page = virt_to_page(addr); + if (PageSlab(page)) { + struct slab *slab = page_slab(page); + + /* + * For performance considerations, + * we clean slab info here (when allocating pages). + * So that slabs can reuse their flags and obj_cgroups + * without being cleared or freed if the previous user + * is slab too. + */ + slab->slab_cache = NULL; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + page->mapping = NULL; +#ifdef CONFIG_DEBUG_VM + atomic_set(&page->_refcount, 0); +#endif + + if (random_fault) + kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS_PAGE]++; + + return page; +} + static inline void put_free_meta_to_node(struct kfence_metadata *object, struct kfence_freelist_node *kfence_freelist) { @@ -912,6 +1018,20 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z } } +static void kfence_guarded_free_page(struct page *page, void *addr, struct kfence_metadata *meta) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, false, true)) + return; + + put_free_meta(meta); + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES_PAGE]++; + +} + static void rcu_guarded_free(struct rcu_head *h) { struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); @@ -2363,6 +2483,66 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) alloc_stack_hash, node); } +#define GFP_KFENCE_NOT_ALLOC ((GFP_ZONEMASK & ~__GFP_HIGHMEM) | __GFP_NOKFENCE | __GFP_THISNODE) +struct page *__kfence_alloc_page(int node, gfp_t flags) +{ + unsigned long stack_entries[KFENCE_STACK_DEPTH]; + size_t num_stack_entries; + u32 alloc_stack_hash; + + if (!static_branch_likely(&kfence_order0_page)) + return NULL; + + if ((flags & GFP_KFENCE_NOT_ALLOC) || (flags & GFP_USER) == GFP_USER) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; + return NULL; + } + + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + + if (atomic_inc_return(&kfence_allocation_gate) > 1) + return NULL; +#ifdef CONFIG_KFENCE_STATIC_KEYS + /* + * waitqueue_active() is fully ordered after the update of + * kfence_allocation_gate per atomic_inc_return(). + */ + if (waitqueue_active(&allocation_wait)) { + /* + * Calling wake_up() here may deadlock when allocations happen + * from within timer code. Use an irq_work to defer it. + */ + irq_work_queue(&wake_up_kfence_timer_work); + } +#endif + +alloc: + if (!READ_ONCE(kfence_enabled)) + return NULL; + + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } + } + + return kfence_guarded_alloc_page(node, stack_entries, num_stack_entries, alloc_stack_hash); +} + size_t kfence_ksize(const void *addr) { const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); @@ -2409,6 +2589,13 @@ void __kfence_free(void *addr) kfence_guarded_free(addr, meta, false); } +void __kfence_free_page(struct page *page, void *addr) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + kfence_guarded_free_page(page, addr, meta); +} + bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { struct kfence_metadata *to_report = NULL; diff --git a/mm/kfence/report.c b/mm/kfence/report.c index 3d1c82b8d230..e2f051e223ef 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -132,6 +132,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; + struct page *page = virt_to_page((void *)start); lockdep_assert_held(&meta->lock); @@ -142,7 +143,8 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n", meta - kfence_metadata, (void *)start, (void *)(start + size - 1), - size, (cache && cache->name) ? cache->name : ""); + size, (cache && cache->name) ? cache->name : PageSlab(page) ? + "" : "PAGE"); kfence_print_stack(seq, meta, true); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ab7141735012..b7e3a5c37928 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -955,6 +956,12 @@ static inline bool free_page_is_bad(struct page *page) if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) return false; +#ifdef CONFIG_KFENCE + /* It's not performance sensitive when reaching here */ + if (PageKfence(page)) + return false; +#endif + /* Something has gone sideways, find it */ free_page_is_bad_report(page); return true; @@ -1132,7 +1139,7 @@ static __always_inline bool free_pages_prepare(struct page *page, } page_cpupid_reset_last(page); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE; reset_page_owner(page, order); page_table_check_free(page, order); @@ -1266,6 +1273,9 @@ static void __free_pages_ok(struct page *page, unsigned int order, if (!free_pages_prepare(page, order, fpi_flags)) return; + if (unlikely(!order && kfence_free_page(page))) + return; + /* * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here * is used to avoid calling get_pfnblock_migratetype() under the lock. @@ -2373,6 +2383,10 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, bool free_high; __count_vm_events(PGFREE, 1 << order); + + if (unlikely(!order && kfence_free_page(page))) + return; + pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; @@ -4340,7 +4354,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, continue; } - page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, + page = kfence_alloc_page(0, preferred_nid, gfp); + if (likely(!page)) + page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ @@ -4424,6 +4440,12 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + page = kfence_alloc_page(order, preferred_nid, alloc_gfp); + if (unlikely(page)) { + prep_new_page(page, 0, alloc_gfp, alloc_flags); + goto out; + } + /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) diff --git a/mm/slub.c b/mm/slub.c index 1922fe52198e..e0a43a4ca532 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1866,6 +1866,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct slab *slab; unsigned int order = oo_order(oo); + flags |= __GFP_NOKFENCE; if (node == NUMA_NO_NODE) folio = (struct folio *)alloc_pages(flags, order); else -- Gitee From ae0de3fe5d10698563b9385033b20e3ddef54f79 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 11 Mar 2024 17:52:16 +0800 Subject: [PATCH 268/953] anolis: kfence: add order-0 page test cases ANBZ: #8499 Test cases of out of bounds read/write and use after free read for order-0 page, stack protection, vmalloc, and GFP_NOKFENCE are added. Since double free has been checked by put_page_testzero(), there is no need and no use to add this case. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- mm/kfence/kfence_test.c | 144 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 27299531307b..6e26b9b7ed4a 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -226,6 +226,8 @@ static __always_inline void test_free(void *ptr) kfree(ptr); } +#define test_free_page(addr) free_page((unsigned long)addr) + /* * If this should be a KFENCE allocation, and on which side the allocation and * the closest guard page should be. @@ -321,6 +323,50 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat return NULL; /* Unreachable. */ } +static struct page *test_alloc_page(struct kunit *test, bool is_vmalloc) +{ + long _kfence_sample_interval = kfence_sample_interval; + struct page *alloc; + void *addr; + unsigned long timeout, resched_after; + + kunit_info(test, "%s: size=%zu vmalloc=%d\n", __func__, PAGE_SIZE, is_vmalloc); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); + do { + if (is_vmalloc) { + addr = vmalloc(PAGE_SIZE); + alloc = vmalloc_to_page(addr); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + vfree(addr); + } else { + alloc = alloc_page(GFP_KERNEL); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + __free_page(alloc); + } + + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate page from KFENCE"); + return NULL; /* Unreachable. */ +} + static void test_out_of_bounds_read(struct kunit *test) { size_t size = 32; @@ -355,6 +401,33 @@ static void test_out_of_bounds_read(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_read_page, + .is_write = false, + }; + char *buf; + struct page *page; + + /* Test both sides. */ + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf + PAGE_SIZE; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_out_of_bounds_write(struct kunit *test) { size_t size = 32; @@ -373,6 +446,24 @@ static void test_out_of_bounds_write(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_write_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_write_page, + .is_write = true, + }; + char *buf; + struct page *page; + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_use_after_free_read(struct kunit *test) { const size_t size = 32; @@ -389,6 +480,22 @@ static void test_use_after_free_read(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } +static void test_use_after_free_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_use_after_free_read_page, + .is_write = false, + }; + struct page *page; + + page = test_alloc_page(test, false); + expect.addr = page_address(page); + test_free_page(expect.addr); + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + static void test_double_free(struct kunit *test) { const size_t size = 32; @@ -783,6 +890,37 @@ static void test_memcache_alloc_bulk(struct kunit *test) KUNIT_EXPECT_FALSE(test, report_available()); } +static void test_kernel_stack(struct kunit *test) +{ + unsigned long vaddr = (unsigned long)current->stack; + struct page *page; + int i; + + KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_VMAP_STACK) && kfence_sample_interval < 0); + + for (i = 0 ; i < 1< Date: Tue, 12 Mar 2024 11:26:30 +0800 Subject: [PATCH 269/953] anolis: docs/kfence: update document for KFENCE ANBZ: #8499 Update Documentation/dev-tools/kfence.rst. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- Documentation/dev-tools/kfence.rst | 131 ++++++++++++++++++++++++++++- 1 file changed, 130 insertions(+), 1 deletion(-) diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 936f6aaa75c8..0cfa0f667f2a 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -67,13 +67,142 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +You can change the KFENCE memory pool size by setting ``kfence.num_objects`` +in boot command line, or writing to +``/sys/module/kfence/parameters/num_objects`` when kfence is not enabled, +and the pool size of each node will be computed and updated +in the same way as above. You can set this value as large as possible, so +please be careful DO NOT use up all memorys. +When enabling KFENCE, ``num_objects`` will be adjusted to make the pool size +aligned up to 1GiB. That means, ``num_objects`` itself will be aligned up to +131071 (Unless ``num_objects`` is smaller than it and is regarded as using +the upstream mode). + +You can enable/disable KFENCE dynamically after startup by writing a proper +number to ``/sys/module/kfence/parameters/sample_interval``. Setting this value +to 0 means disabling KFENCE, and unused KFENCE pool memory will be +automatically freed. Otherwise KFENCE will be enabled, it will try to alloc +enough memory to hold ``num_objects`` the user has set. If this value is a +negative number, sample_interval will be invalid, and KFENCE will alloc slabs +and pages from its pool at all time if possible. + +You can change KFENCE pool mode by setting ``kfence.pool_mode`` in boot command +line, or writing to ``/sys/module/kfence/parameters/pool_mode`` when kfence is +not enabled. If the value is ``global`` (as default), ``num_objects`` becomes a +global total sum. The total KFENCE pools will hold ``num_objects`` slabs/pages. +Otherwise if the value is ``node``, ``num_objects`` becomes a per node value, +KFENCE pools on each node will hold ``num_objects`` slabs/pages separately. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. +TLB recover issue +~~~~~~~~~~~~~~~~~ + +For some arch like x86, kernel virtual address directly mapping to physical +address is mapped by PUD huge TLB, so that performance can be improved since +kernel no need to visit PMD and PTE. Each PUD covers an 1GiB area. + +However, KFENCE needs to set guarded pages and breaks this design. PUD will be +splited to PTE, meaning that an 1GiB area will be splited to a large number of +4KiB (page size) areas. This may impact the performance. + +To solve this issue, the size of each kfence pool area is forced to be 1GiB, +and one area can hold 131071 objects, calculating by:: + + 1GiB / 4KiB / 2 - 1 = 131071 + +So the user input kfence.num_objects will be aligned up to 131071 for +convenience of splitting them to several 1GiB areas. + +One KFENCE pool area will be allocated in 1GiB aligned address, ensuring +only splitting one PUD. When KFENCE is disabled and there is no active +slabs/pages in this area, it will be freed and the corresponding TLB will +be recovered to the origin PUD (only on x86_64 now). + +An exception is the user input less than 131071 in boot cmdline. See mode 1 +of the following examples. + +Set a pool limit on various memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like crashkernel, the user can limit the size of kfence pool by setting +``kfence.booting_max`` in boot command line. A reasonable config can be:: + + kfence.booting_max=0-128M:0,128M-256M:1M,256M-:2M + +So that: + On machines with memory of [0, 128M), kfence will not be enabled. + + On machines with memory of [128M, 256M), kfence will allocate at most 1MB + for kfence pool. (which means num_objects = 127 on page_size = 4KB) + + On machines with memory larger than 256M, kfence will allocate at most 2MB + for kfence pool. (which means num_objects = 255 on page_size = 4KB) + +Notes: + This config only sets the upper limit, so if the user sets num_objects = 127 + and ``kfence.booting_max=0-:2M``, kfence will still allocate 1MB for pool. + + This config only works for upstream mode. (pool_size < 1GB and + sample_interval > 0) Because if the user want to use debug mode, he must + focus on the specific machine and not need this general setting. + +Examples +~~~~~~~~ + +There are mainly three kinds of distributing method. + +1. Upstream mode:: + + num_objects < 131071 + pool_mode = global (cannot be node) + sample_interval cannot be negative + + In this mode, everything looks like it is in upstream. However, if user + enlarges ``num_objects`` after startup, it will be aligned up to 131071 + and become mode 2. + +2. Global mode:: + + num_objects >= 131071 + pool_mode = global + + For example, if num_objects = 131071 * 2, and there are 4 nodes in total. + Node 0 and node 1 will separately alloc 1GiB memoty for KFENCE pools, and + there is nothing to do with node 2 and node 3. Sampling slabs and pages on + these empty nodes (2 and 3) will be mapped to previous nodes (0 and 1). + + If num_objects = 131071 * 6, the memory usage will be [2, 2, 1, 1]GiB on + these 4 nodes. + +3. Per node mode:: + + num_objects >= 131071 + pool_mode = node + + This mode is easy to understand. If num_objects = 131071 * n, the memory + usage will be [n, n, n, n]GiB on 4 nodes. + +Monitoring specific slabs +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If users want to enable or disable KFENCE for specific slabs, setting via +per_slab switch at ``/sys/kernel/slab//skip_kfence``. The default +value is 0 for all slabs (meaning do not skip). + +Users can also switch monitoring order0 pages by +setting ``kfence.order0_page`` in boot command line, +or writing to ``/sys/module/kfence/parameters/order0_page``. + Error reports ~~~~~~~~~~~~~ +By default, KFENCE will only report faults in dmesg. If users want to panic +the kernel, set ``kfence.fault=panic`` in boot command line, or write "panic" +to ``/sys/module/kfence/parameters/fault``. + A typical out-of-bounds access looks like this:: ================================================================== @@ -258,7 +387,7 @@ object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting an out-of-bounds access, and marking the page as accessible so that the faulting -code can (wrongly) continue executing (set ``panic_on_warn`` to panic instead). +code can (wrongly) continue executing. To detect out-of-bounds writes to memory within the object's page itself, KFENCE also uses pattern-based redzones. For each object page, a redzone is set -- Gitee From aed5128b86093da022bcff54b2f8c85f4932aee9 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Tue, 12 Mar 2024 11:34:38 +0800 Subject: [PATCH 270/953] anolis: configs: update kfence configs ANBZ: #8499 CONFIG_KFENCE_DEFERRABLE is useful to save power. Enable it. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- arch/arm64/configs/anolis-debug_defconfig | 4 ++-- arch/arm64/configs/anolis_defconfig | 2 +- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 419072dae8e2..eb020b30c47d 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -6884,9 +6884,9 @@ CONFIG_KASAN_VMALLOC=y # CONFIG_KASAN_MODULE_TEST is not set CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_SAMPLE_INTERVAL=100 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 # end of Memory Debugging diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 27ba33178302..aba19cdc5358 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -6868,7 +6868,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=0 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 # end of Memory Debugging diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ecdf002bc8a9..0a31d38d115c 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7477,7 +7477,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=100 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 CONFIG_HAVE_ARCH_KMSAN=y # end of Memory Debugging diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 4abe45995548..0b0922337278 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7436,7 +7436,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=0 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 CONFIG_HAVE_ARCH_KMSAN=y # end of Memory Debugging -- Gitee From 39e18cb04c1f6a694e12aa83e90b3064f267ef30 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Mar 2024 20:17:47 +0800 Subject: [PATCH 271/953] anolis: crypto: ccp: Fixup the capability of Hygon PSP during initialization ANBZ: #8560 The meaning of the data read from feature register of Hygon PSP is not exactly the same as AMD ASP. The bit 1 in feature register is used to indicates TEE in AMD ASP, but not in Hygon PSP, which will cause host to crash during module initialization, as shown below. [ 27.898723] BUG: kernel NULL pointer dereference, address: 0000000000000014 [ 27.906503] #PF: supervisor read access in kernel mode [ 27.912242] #PF: error_code(0x0000) - not-present page [ 27.917981] PGD 0 P4D 0 [ 27.920810] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 27.925676] CPU: 67 PID: 1668 Comm: systemd-udevd Not tainted 6.6.7-for-gerrit #3 [ 27.934033] Hardware name: HYGON Hygon65N32/65N32, BIOS A0173036 02/01/2023 [ 27.941807] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 27.948240] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 27.969204] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 27.975039] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 27.983008] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 27.993320] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.005756] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.019695] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.032285] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.044626] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.054928] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 [ 28.065028] Call Trace: [ 28.067751] [ 28.070095] ? __die_body+0x1f/0x60 [ 28.073995] ? page_fault_oops+0x15d/0x460 [ 28.078573] ? exc_page_fault+0x78/0x170 [ 28.082956] ? asm_exc_page_fault+0x26/0x30 [ 28.087632] ? psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.093384] internal_create_group+0xde/0x3a0 [ 28.093392] internal_create_groups.part.0+0x3d/0xa0 [ 28.093396] really_probe+0x197/0x3c0 [ 28.093402] ? __device_attach_driver+0x100/0x100 [[ 0 ;2382.m0 9 3O4K0 5 ] __driver_probe_device+0x78/0x160 [ 28.093409] driver_probe_device+0x1e/0xa0 [ 28.126379] __driver_attach+0xaa/0x160 [ 28.130667] ? __device_attach_driver+0x100/0x100 [ 28.135921] bus_for_each_dev+0x75/0xc0 [ 28.142419] bus_add_driver+0x112/0x210 [ 28.149240] driver_register+0x5c/0x110 [ 28.154875] ? 0xffffffffc14a4000 [ 28.160197] sp_mod_init+0x10/0x1000 [ccp] [ 28.166164] do_one_initcall+0x45/0x210 [ 28.170453] ? kmalloc_trace+0x29/0x90 [ 28.174642] do_init_module+0x64/0x240 [ 28.178831] load_module+0x1d84/0x2010 [ 28.183024] ? init_module_from_file+0x8b/0xd0 [ 28.187986] init_module_from_file+0x8b/0xd0 [ 28.192763] do_syscall_64+0x39/0x80 [ 28.206672] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 28.212318] RIP: 0033:0x7f7c9b91ea3d [ 28.216312] Code: 5b 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 a3 0f 00 f7 d8 64 89 01 48 [ 28.237272] RSP: 002b:00007ffe6cee5368 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 28.245725] RAX: ffffffffffffffda RBX: 000055700e302260 RCX: 00007f7c9b91ea3d [ 28.253691] RDX: 0000000000000000 RSI: 00007f7c9ba5cded RDI: 0000000000000006 [ 28.261658] RBP: 0000000000020000 R08: 0000000000000000 R09: 000055700e4d3188 [ 28.269624] R10: 0000000000000006 R11: 0000000000000246 R12: 00007f7c9ba5cded [ 28.277590] R13: 0000000000000000 R14: 000055700e4cb7b0 R15: 000055700e302260 [ 28.285552] [ 28.287995] Modules linked in: k10temp ccp(+) drm_kms_helper ipmi_si(+) ipmi_devintf ipmi_msghandler mac_hid sch_fq_codel parport_pc ppdev lp parport ramoops drm reed_solomon efi_pstore ip_tables x_tables autofs4 btrfs blake2b_generic raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear igb i2c_algo_bit dca ptp crc32_pclmul pps_core ahci libahci i2c_piix4 hid_generic usbhid hid [ 28.288027] CR2: 0000000000000014 [ 28.288031] ---[ end trace 0000000000000000 ]--- [ 28.533899] ipmi_si IPI0001:00: IPMI message handler: Found new BMC (man_id: 0x00d455, prod_id: 0x0202, dev_id: 0x20) [ 28.604507] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.604527] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 28.604530] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 28.604533] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 28.604535] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 28.604536] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.604537] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.604539] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.604540] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.604542] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.604543] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 Also, the meaning of bit 7 in the feature register of Hygon PSP is not the same as AMD ASP. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2897 --- drivers/crypto/ccp/psp-dev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d42d7bc62352..223e198eddec 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -56,6 +56,13 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static void hygon_fixup_psp_caps(struct psp_device *psp) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); +} + static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); @@ -73,6 +80,12 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability = val; + /* + * Fix capability of Hygon psp, the meaning of Hygon psp feature + * register is not exactly the same as AMD. + */ + hygon_fixup_psp_caps(psp); + /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && -- Gitee From 8dced27840d05bdc0b16f7c6a9b34e62d93992b3 Mon Sep 17 00:00:00 2001 From: Hao Feng Date: Thu, 25 Mar 2021 13:36:31 +0800 Subject: [PATCH 272/953] anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 2nd/3rd CPUs ANBZ: #8560 There are 2 types of CCP devices on Hygon 2nd/3rd CPUs, add them in the device list. Signed-off-by: Hao Feng Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2897 --- drivers/crypto/ccp/sp-pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b6ab56abeb68..dd7db55f9587 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -576,6 +576,8 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, /* Last entry must be zero */ { 0, } }; -- Gitee From 4a6c1ec0b0b728a34f27188351c76e24ded265ce Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Thu, 22 Sep 2022 10:59:03 +0800 Subject: [PATCH 273/953] anolis: crypto: ccp: Implement CSV_HGSC_CERT_IMPORT ioctl command ANBZ: #8562 The CSV_HGSC_CERT_IMPORT command can be used to import hygon general secure cert to the Secure Proccessor, to enable Hygon Secure Functions, such as CSV, TPM, TPCM, TDM. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2901 --- drivers/crypto/ccp/sev-dev.c | 73 +++++++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 21 +++++++++++ include/uapi/linux/psp-sev.h | 24 ++++++++++++ 3 files changed, 116 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 17fb01853dbf..de7965eb12fc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -127,6 +127,15 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, static int sev_cmd_buffer_len(int cmd) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: + return sizeof(struct csv_data_hgsc_cert_import); + default: + break; + } + } + switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); @@ -1070,6 +1079,50 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = __sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} + static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -1086,11 +1139,26 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT; - if (input.cmd > SEV_MAX) - return -EINVAL; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (input.cmd > CSV_MAX) + return -EINVAL; + } else { + if (input.cmd > SEV_MAX) + return -EINVAL; + } mutex_lock(&sev_cmd_mutex); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (input.cmd) { + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + goto result_to_user; + default: + break; + } + } + switch (input.cmd) { case SEV_FACTORY_RESET: @@ -1126,6 +1194,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) goto out; } +result_to_user: if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fd17e82bab4..2b40efb57274 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -81,6 +81,11 @@ enum sev_cmd { SEV_CMD_MAX, }; +enum csv_cmd { + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + /** * struct sev_data_init - INIT command parameters * @@ -523,6 +528,22 @@ struct sev_data_attestation_report { u32 len; /* In/Out */ } __packed; +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 1c9da485318f..ae76776c0b15 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -32,6 +32,15 @@ enum { SEV_MAX, }; +/** + * CSV platform commands + */ +enum { + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + /** * SEV Firmware status code */ @@ -154,6 +163,21 @@ struct sev_user_data_get_id2 { __u32 length; /* In/Out */ } __packed; +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * -- Gitee From 2f7458f5c047510c6cb3800c6015fcb1f6eb3e43 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 26 Apr 2021 10:47:46 +0800 Subject: [PATCH 274/953] anolis: KVM: x86: Support VM_ATTESTATION hypercall ANBZ: #8564 When sev guest wants to collect the attestation report, it cannot directly communicate with psp. Add VM_ATTESTATION hypercall to allow sev guest to tell host to help get the attestation report. Since sev guest memory is encrypted, host cannot tamper with the report data. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2902 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/svm/sev.c | 67 ++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 2 + arch/x86/kvm/svm/svm.h | 2 + arch/x86/kvm/x86.c | 8 +++- include/uapi/linux/kvm_para.h | 1 + 7 files changed, 82 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 9b419f0de713..3ab3e361ea81 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,7 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(vm_attestation) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fb9f5fa96cc9..0dbbe96afb8a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1750,6 +1750,8 @@ struct kvm_x86_ops { * Returns vCPU specific APICv inhibit reasons */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index e86231c3b8a5..6f52785aec60 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,8 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static const char sev_vm_mnonce[] = "VM_ATTESTATION"; + struct enc_region { struct list_head list; unsigned long npages; @@ -3143,3 +3145,68 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } + +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* sev_vm_mnonce indicates attestation request from guest */ + if (sizeof(sev_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, sev_vm_mnonce, sizeof(sev_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + sev_unpin_memory(kvm, pages, n); + return ret; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 77f1eeefcd34..4d3aaa49ea01 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5039,6 +5039,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, + + .vm_attestation = sev_vm_attestation, }; /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be67ab7fdd10..b2249897a7db 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -693,6 +693,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4aafd007964f..5eeba2b810ca 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9858,7 +9858,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && + nr != KVM_HC_VM_ATTESTATION) { ret = -KVM_EPERM; goto out; } @@ -9921,6 +9922,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } + case KVM_HC_VM_ATTESTATION: + ret = -KVM_ENOSYS; + if (kvm_x86_ops.vm_attestation) + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); + break; default: ret = -KVM_ENOSYS; break; diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a9..67192835455e 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,7 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ /* * hypercalls use architecture specific -- Gitee From 19b05d6470eba2f30914b7d690b5d703d371c742 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Tue, 30 May 2023 17:34:30 +0800 Subject: [PATCH 275/953] anolis: driver/virt/coco: Add HYGON CSV Guest dirver. ANBZ: #8564 CSV firmware provides the guest a mechanism to communicate with the PSP without risk from a malicious hypervisor who wishes to read, alter, drop or replay the messages sent. The driver provides userspace interface to communicate with the PSP to request the attestation report and more. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2902 --- Documentation/virt/coco/csv-guest.rst | 33 +++++++ drivers/virt/Kconfig | 2 + drivers/virt/Makefile | 1 + drivers/virt/coco/csv-guest/Kconfig | 12 +++ drivers/virt/coco/csv-guest/Makefile | 2 + drivers/virt/coco/csv-guest/csv-guest.c | 114 ++++++++++++++++++++++++ drivers/virt/coco/csv-guest/csv-guest.h | 42 +++++++++ 7 files changed, 206 insertions(+) create mode 100644 Documentation/virt/coco/csv-guest.rst create mode 100644 drivers/virt/coco/csv-guest/Kconfig create mode 100644 drivers/virt/coco/csv-guest/Makefile create mode 100644 drivers/virt/coco/csv-guest/csv-guest.c create mode 100644 drivers/virt/coco/csv-guest/csv-guest.h diff --git a/Documentation/virt/coco/csv-guest.rst b/Documentation/virt/coco/csv-guest.rst new file mode 100644 index 000000000000..23cba2a5fd7c --- /dev/null +++ b/Documentation/virt/coco/csv-guest.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +CSV Guest API Documentation +=================================================================== + +1. General description +====================== + +The CSV guest driver exposes IOCTL interfaces via the /dev/csv-guest misc +device to allow userspace to get certain CSV guest-specific details. + +2. API description +================== + +In this section, for each supported IOCTL, the following information is +provided along with a generic description. + +:Input parameters: Parameters passed to the IOCTL and related details. +:Output: Details about output data and return value (with details about + the non common error values). + +2.1 CSV_CMD_GET_REPORT +----------------------- + +:Input parameters: struct csv_report_req +:Output: Upon successful execution, CSV_REPORT data is copied to + csv_report_req.report_data and return 0. Return -EINVAL for invalid + operands, -EIO on VMMCALL failure or standard error number on other + common failures. + +The CSV_CMD_GET_REPORT IOCTL can be used by the attestation software to get +the CSV_REPORT from the CSV module using VMMCALL[KVM_HC_VM_ATTESTATION]. diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index f79ab13a5c28..b1c4efa00182 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -54,4 +54,6 @@ source "drivers/virt/coco/sev-guest/Kconfig" source "drivers/virt/coco/tdx-guest/Kconfig" +source "drivers/virt/coco/csv-guest/Kconfig" + endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index e9aa6fc96fab..62681a260758 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_ACRN_HSM) += acrn/ obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/ obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += coco/tdx-guest/ +obj-$(CONFIG_CSV_GUEST) += coco/csv-guest/ diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig new file mode 100644 index 000000000000..4cbde598e665 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -0,0 +1,12 @@ +config CSV_GUEST + tristate "HYGON CSV Guest driver" + default m + depends on AMD_MEM_ENCRYPT + help + CSV firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called csv-guest. diff --git a/drivers/virt/coco/csv-guest/Makefile b/drivers/virt/coco/csv-guest/Makefile new file mode 100644 index 000000000000..a1c3a1499fc6 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CSV_GUEST) += csv-guest.o diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c new file mode 100644 index 000000000000..7bd9abe7d8b6 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Userspace interface for CSV guest driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: fangbaoshun + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "csv-guest.h" + +static long csv_get_report(void __user *argp) +{ + u8 *csv_report; + long ret; + struct csv_report_req req; + + if (copy_from_user(&req, argp, sizeof(struct csv_report_req))) + return -EFAULT; + + if (req.len < CSV_REPORT_INPUT_DATA_LEN) + return -EINVAL; + + csv_report = kzalloc(req.len, GFP_KERNEL); + if (!csv_report) { + ret = -ENOMEM; + goto out; + } + + /* Save user input data */ + if (copy_from_user(csv_report, req.report_data, CSV_REPORT_INPUT_DATA_LEN)) { + ret = -EFAULT; + goto out; + } + + /* Generate CSV_REPORT using "KVM_HC_VM_ATTESTATION" VMMCALL */ + ret = kvm_hypercall2(KVM_HC_VM_ATTESTATION, __pa(csv_report), req.len); + if (ret) + goto out; + + if (copy_to_user(req.report_data, csv_report, req.len)) + ret = -EFAULT; + +out: + kfree(csv_report); + return ret; +} + +static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case CSV_CMD_GET_REPORT: + return csv_get_report((void __user *)arg); + default: + return -ENOTTY; + } +} + +static void mem_test_init(void) +{ + char head_str[] = "test mem encrypt"; + u64 *va_addr = __va(0x0); + + if (va_addr) { + memset(va_addr, 0x66, PAGE_SIZE); + memcpy(va_addr, head_str, sizeof(head_str)); + clflush_cache_range(va_addr, PAGE_SIZE); + } else + pr_err("Initialize 1 page for csv memory test failed!\n"); +} + +static const struct file_operations csv_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_guest_ioctl, + .compat_ioctl = csv_guest_ioctl, +}; + +static struct miscdevice csv_guest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "csv-guest", + .fops = &csv_guest_fops, + .mode = 0777, +}; + +static int __init csv_guest_init(void) +{ + // This module only working on CSV guest vm. + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return -ENODEV; + + // Initialize 1 page for csv memory test + mem_test_init(); + + return misc_register(&csv_guest_dev); +} + +static void __exit csv_guest_exit(void) +{ + misc_deregister(&csv_guest_dev); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("HYGON CSV Guest Driver"); +module_init(csv_guest_init); +module_exit(csv_guest_exit); diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h new file mode 100644 index 000000000000..0342d5f16cb3 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Userspace interface for CSV guest driver + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __VIRT_CSVGUEST_H__ +#define __VIRT_CSVGUEST_H__ + +#include +#include + +/* Length of the user input datas used in VMMCALL */ +#define CSV_REPORT_USER_DATA_LEN 64 +#define CSV_REPORT_MNONCE_LEN 16 +#define CSV_REPORT_HASH_LEN 32 +#define CSV_REPORT_INPUT_DATA_LEN (CSV_REPORT_USER_DATA_LEN + CSV_REPORT_MNONCE_LEN \ + + CSV_REPORT_HASH_LEN) + +/** + * struct csv_report_req - Request struct for CSV_CMD_GET_REPORT IOCTL. + * + * @report_data:User buffer with REPORT_DATA to be included into CSV_REPORT, and it's also + * user buffer to store CSV_REPORT output from VMMCALL[KVM_HC_VM_ATTESTATION]. + * @len: Length of the user buffer. + */ +struct csv_report_req { + u8 *report_data; + int len; +}; + +/* + * CSV_CMD_GET_REPORT - Get CSV_REPORT using VMMCALL[KVM_HC_VM_ATTESTATION] + * + * Return 0 on success, -EIO on VMMCALL execution failure, and + * standard errno on other general error cases. + */ +#define CSV_CMD_GET_REPORT _IOWR('D', 1, struct csv_report_req) + +#endif /* __VIRT_CSVGUEST_H__ */ -- Gitee From 39045ba696e1d4f98ac328e136126126945d3a1c Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 13:49:52 +0800 Subject: [PATCH 276/953] anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 4th CPUs ANBZ: #8568 Since Hygon 4th CPUs, there are Cryptographic Co-Processor devices with 3 different PCI device IDs, add them in the device list. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- drivers/crypto/ccp/sp-pci.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index dd7db55f9587..5185555a74a7 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -417,6 +417,12 @@ static const struct sev_vdata sevv2 = { .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, + .cmdbuff_addr_lo_reg = 0x105e0, + .cmdbuff_addr_hi_reg = 0x105e4, +}; + static const struct tee_vdata teev1 = { .cmdresp_reg = 0x10544, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ @@ -498,6 +504,13 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata psp_csvv1 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -562,6 +575,15 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv6, +#endif + }, + { /* 9 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &psp_csvv1, #endif }, }; @@ -578,6 +600,9 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[9] }, /* Last entry must be zero */ { 0, } }; -- Gitee From c6c86d589ce6b0009893bf4c1898fbb3f6e68282 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 14 Jul 2023 17:17:58 +0800 Subject: [PATCH 277/953] anolis: x86/mm: Print CSV info into the kernel log ANBZ: #8568 Add CSV and CSV2 to the list of memory encryption features. Also print CPU vendor while printing CSV infos. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/mm/mem_encrypt.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9f27e14e185f..b97261dfd13d 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -39,6 +39,27 @@ bool force_dma_unencrypted(struct device *dev) return false; } +static void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); +} + static void print_mem_encrypt_feature_info(void) { pr_info("Memory Encryption Features active:"); @@ -48,6 +69,11 @@ static void print_mem_encrypt_feature_info(void) return; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + print_hygon_cc_feature_info(); + return; + } + pr_cont(" AMD"); /* Secure Memory Encryption */ -- Gitee From 444497e508fd2cabd5ca804dff067c74191ca60a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 18:44:56 +0800 Subject: [PATCH 278/953] anolis: crypto: ccp: Print Hygon CSV API version when CSV support is detected ANBZ: #8568 The Cryptographic Co-Processor module will print 'SEV API' instead of 'CSV API' on Hygon CPU if CSV is supported. Fix this confused message here. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- drivers/crypto/ccp/sev-dev.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index de7965eb12fc..909cf8c964f9 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -80,6 +80,13 @@ static void *sev_es_tmr; #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +static u32 hygon_csv_build; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -509,8 +516,12 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, - sev->api_minor, sev->build); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, hygon_csv_build); + else + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); return 0; } @@ -732,6 +743,10 @@ static int sev_get_api_version(void) sev->build = status.build; sev->state = status.state; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + hygon_csv_build = (status.flags >> 9) | + ((u32)status.build << 23); + return 0; } -- Gitee From fff688316d646cdaf4e4de4cfe4d04170e6cc87d Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 19:02:27 +0800 Subject: [PATCH 279/953] anolis: KVM: SVM: Print Hygon CSV support info if support is detected ANBZ: #8568 The KVM will print 'SEV supported' instead of 'CSV supported' on Hygon CPU if CSV is supported. Fix these confused messages here. Fix other 'SEV' messages in arch/x86/kvm/svm/svm.c. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kvm/svm/sev.c | 6 ++++-- arch/x86/kvm/svm/svm.c | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6f52785aec60..756df9e46e67 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2267,11 +2267,13 @@ void __init sev_hardware_setup(void) out: if (boot_cpu_has(X86_FEATURE_SEV)) - pr_info("SEV %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV" : "SEV", sev_supported ? "enabled" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) - pr_info("SEV-ES %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 4d3aaa49ea01..13cb7b2919c7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -545,7 +545,10 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + pr_info("KVM is unsupported when running as an CSV guest\n"); + else + pr_info("KVM is unsupported when running as an SEV guest\n"); return false; } -- Gitee From 09fa56e0ab78156c8395220f70d463a3a1d996b4 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 31 Jul 2023 23:35:42 +0800 Subject: [PATCH 280/953] anolis: x86/cpu: Detect memory encryption features on Hygon CPUs ANBZ: #8568 Hygon SME is identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG). Hygon CSV and CSV2 are identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG and set bit 0 of MSR_K7_HWCR). Only show the SME, CSV, CSV2 features as available if reported by CPUID and enabled by BIOS. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kernel/cpu/hygon.c | 46 +++++++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/proc.c | 10 ++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index b6f932d2d6aa..07a3a2863ae1 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -246,6 +246,50 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + u32 eax; + + eax = cpuid_eax(0x8000001f); + + /* Check whether SME or CSV is supported */ + if (!(eax & (BIT(0) | BIT(1)))) + return; + + /* If BIOS has not enabled SME then don't advertise the SME feature. */ + rdmsrl(MSR_AMD64_SYSCFG, msr); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this will be a + * value above 32-bits this is still done for CONFIG_X86_32 so that + * accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + /* Don't advertise SME and CSV features under CONFIG_X86_32. */ + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + /* + * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 + * feature. + */ + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_csv; + + return; + +clear_all: + setup_clear_cpu_cap(X86_FEATURE_SME); +clear_csv: + setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); +} + static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; @@ -294,6 +338,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_VMMCALL); hygon_get_topology_early(c); + + early_detect_mem_encrypt(c); } static void init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 31c0e68f6227..a0f81db51eac 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -100,8 +100,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) { + if (c->x86_vendor == X86_VENDOR_HYGON) + seq_printf(m, " %s", i == X86_FEATURE_SEV ? "csv" : + (i == X86_FEATURE_SEV_ES ? "csv2" : + x86_cap_flags[i])); + else + seq_printf(m, " %s", x86_cap_flags[i]); + } #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { -- Gitee From d60328348cba435a62926c9f1d197cb5ed6a7a93 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:20:47 +0800 Subject: [PATCH 281/953] anolis: x86/cpufeatures: Add CPUID_8C86_0000_EDX CPUID leaf ANBZ: #8568 This is a pure feature bits leaf. Add SM3 and SM4 feature bits from this leaf on Hygon CPUs. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/include/asm/cpufeature.h | 7 +++++-- arch/x86/include/asm/cpufeatures.h | 6 +++++- arch/x86/include/asm/disabled-features.h | 3 ++- arch/x86/include/asm/required-features.h | 3 ++- arch/x86/kernel/cpu/hygon.c | 13 +++++++++++++ 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index a1273698fc43..c97522b47019 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -33,6 +33,7 @@ enum cpuid_leafs CPUID_7_EDX, CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, + CPUID_8C86_0000_EDX, }; #define X86_CAP_FMT_NUM "%d:%d" @@ -91,8 +92,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 22)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -116,8 +118,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 22)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c8e03c79769c..9988bf1561bd 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 22 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -469,6 +469,10 @@ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ +/* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 21 */ +#define X86_FEATURE_SM3 (21*32 + 1) /* SM3 instructions */ +#define X86_FEATURE_SM4 (21*32 + 2) /* SM4 instructions */ + /* * BUG word(s) */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 702d93fdd10e..88fcf08458d9 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -143,6 +143,7 @@ #define DISABLED_MASK18 (DISABLE_IBT) #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 7ba1726b71c7..e9187ddd3d1f 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -99,6 +99,7 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 07a3a2863ae1..340710ed89d0 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -246,6 +246,18 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void init_hygon_cap(struct cpuinfo_x86 *c) +{ + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0x8C860000) >= 0x8C860000) { + /* + * Store Extended Feature Flags of the CPU capability + * bit array + */ + c->x86_capability[CPUID_8C86_0000_EDX] = cpuid_edx(0x8C860000); + } +} + static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) { u64 msr; @@ -401,6 +413,7 @@ static void init_hygon(struct cpuinfo_x86 *c) /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + init_hygon_cap(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) -- Gitee From d941fef38f041ef8d12486b17137e6e5630d5025 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:54:15 +0800 Subject: [PATCH 282/953] anolis: x86/cpufeatures: Add CSV3 CPU feature ANBZ: #8568 Add CPU feature detection for Hygon 3rd CSV. This feature enhances CSV2 by also isolating NPT and VMCB, making them in-accessible to the hypervisor. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/hygon.c | 1 + 2 files changed, 3 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 9988bf1561bd..8262eafc95a1 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -457,6 +457,8 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ +/* HYGON 3rd CSV */ +#define X86_FEATURE_CSV3 (19*32 + 30) /* HYGON 3rd CSV */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 340710ed89d0..7b81626648f7 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -300,6 +300,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) clear_csv: setup_clear_cpu_cap(X86_FEATURE_SEV); setup_clear_cpu_cap(X86_FEATURE_SEV_ES); + setup_clear_cpu_cap(X86_FEATURE_CSV3); } static void early_init_hygon(struct cpuinfo_x86 *c) -- Gitee From 99b5ee83ff567b17c4a45f95b19f53958faaae8f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Mar 2024 14:58:10 +0800 Subject: [PATCH 283/953] anolis: x86/cpu/hygon: Clear SME feature flag when not in use ANBZ: #8568 The commit 08f253ec3767 ("x86/cpu: Clear SME feature flag when not in use") will clear SME feature flag if the kernel is not using it on AMD CPUs, this will help userspace to determine if SME is available and in use from /proc/cpuinfo. Apply this change to Hygon CPUs as well. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kernel/cpu/hygon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 7b81626648f7..4b90eeb5110d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -285,6 +285,10 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) if (IS_ENABLED(CONFIG_X86_32)) goto clear_all; + /* Clear the SME feature flag if the kernel is not using it. */ + if (!sme_me_mask) + setup_clear_cpu_cap(X86_FEATURE_SME); + /* * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 * feature. -- Gitee From 216b16e0e4fc6e9fbabf4a6c85dda8440583cc80 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:04 +0800 Subject: [PATCH 284/953] anolis: sw64: add build infrastructure ANBZ: #4688 Add Kbuild, Makefile, Kconfig and link script for SW64 build infrastructure. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/Kbuild | 7 + arch/sw_64/Kconfig | 645 +++++++++++++++++++++++++++++ arch/sw_64/Kconfig.debug | 53 +++ arch/sw_64/Makefile | 69 +++ arch/sw_64/Makefile.postlink | 36 ++ arch/sw_64/boot/.gitignore | 2 + arch/sw_64/boot/Makefile | 29 ++ arch/sw_64/boot/dts/Makefile | 27 ++ arch/sw_64/include/asm/Kbuild | 16 + arch/sw_64/include/uapi/asm/Kbuild | 5 + arch/sw_64/kernel/.gitignore | 2 + arch/sw_64/kernel/Makefile | 51 +++ arch/sw_64/kernel/vmlinux.lds.S | 113 +++++ 13 files changed, 1055 insertions(+) create mode 100644 arch/sw_64/Kbuild create mode 100644 arch/sw_64/Kconfig create mode 100644 arch/sw_64/Kconfig.debug create mode 100644 arch/sw_64/Makefile create mode 100644 arch/sw_64/Makefile.postlink create mode 100644 arch/sw_64/boot/.gitignore create mode 100644 arch/sw_64/boot/Makefile create mode 100644 arch/sw_64/boot/dts/Makefile create mode 100644 arch/sw_64/include/asm/Kbuild create mode 100644 arch/sw_64/include/uapi/asm/Kbuild create mode 100644 arch/sw_64/kernel/.gitignore create mode 100644 arch/sw_64/kernel/Makefile create mode 100644 arch/sw_64/kernel/vmlinux.lds.S diff --git a/arch/sw_64/Kbuild b/arch/sw_64/Kbuild new file mode 100644 index 000000000000..aa0bf0507406 --- /dev/null +++ b/arch/sw_64/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += kernel/ mm/ platform/ +obj-$(CONFIG_NET) += net/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_MATHEMU) += math-emu/ + +obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig new file mode 100644 index 000000000000..0fd1be7195cc --- /dev/null +++ b/arch/sw_64/Kconfig @@ -0,0 +1,645 @@ +# SPDX-License-Identifier: GPL-2.0 +config SW64 + bool + default y + select ACPI + select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY + select ARCH_ATOMIC + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_PMEM_API + select ARCH_HAS_PTE_DEVMAP + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_VM_GET_PAGE_PROT + select ARCH_HAS_ZONE_DEVICE + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_NO_PREEMPT + select ARCH_SUPPORTS_ACPI + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_UPROBES + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_FRAME_POINTERS + select ARCH_WANT_IPC_PARSE_VERSION + select AUDIT_ARCH + select COMMON_CLK + select DMA_OPS if PCI + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_LEGACY + select GENERIC_IRQ_MIGRATION if SMP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP if PCI + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL + select HANDLE_DOMAIN_IRQ + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ASM_MODVERSIONS + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_EBPF_JIT + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_IDE + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PCI + select HAVE_PCSPKR_PLATFORM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if STACKTRACE + select HAVE_RSEQ + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA + select MEMORY_HOTPLUG_SPARSE if MEMORY_HOTPLUG + select MODULES_USE_ELF_RELA + select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OLD_SIGSUSPEND + select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI + select PCI_SW64 if PCI + select SET_FS + select SPARSEMEM_EXTREME if SPARSEMEM + select SW64_IRQ_CPU + select SW64_IRQ_MSI if PCI_MSI + select SW64_IRQ_MSI_VT if PCI_MSI + select SW64_TIMER + select SWIOTLB + select THREAD_INFO_IN_TASK + +config LOCKDEP_SUPPORT + def_bool y + +config 64BIT + def_bool y + +config MMU + bool + default y + +config PGTABLE_LEVELS + int + default 4 + +config ARCH_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_ENABLE_MEMORY_HOTPLUG + bool + default y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + bool + default y + +config ARCH_HAS_ILOG2_U32 + bool + default n + +config ARCH_HAS_ILOG2_U64 + bool + default n + +config GENERIC_GPIO + bool + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config ZONE_DMA32 + bool + default y + +config NEED_DMA_MAP_STATE + def_bool y + +config NEED_SG_DMA_LENGTH + def_bool y + +config ARCH_WANT_HUGE_PMD_SHARE + def_bool y + +config GENERIC_ISA_DMA + bool + default y + +config NONCACHE_PAGE + bool + depends on SW64 + default y + +config AUDIT_ARCH + bool + +config SYS_HAS_EARLY_PRINTK + bool + +config HAVE_CSRRW + bool + +menu "System setup" + +menu "Machine Configuration" + +choice + prompt "Subarchitecture Configuration" + +config SUBARCH_C3B + bool "C3B" + +config SUBARCH_C4 + bool "C4" + select HAVE_CSRRW + select GENERIC_SCHED_CLOCK +endchoice + +choice + prompt "Uncore Configuration" + +config UNCORE_XUELANG + bool "Uncore for C3B" + depends on SUBARCH_C3B + help + Sunway cpu uncore for C3B + +config UNCORE_JUNZHANG + bool "Uncore for C4" + depends on SUBARCH_C4 + help + Sunway cpu uncore for C4 +endchoice + +choice + prompt "Platform Type" + +config PLATFORM_XUELANG + bool "Xuelang" + depends on UNCORE_XUELANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + select SW64_INTC_V2 + select I2C_SUNWAY if I2C + help + Sunway board chipset for C3B + +config PLATFORM_JUNZHANG + bool "JunZhang" + depends on UNCORE_JUNZHANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + help + Sunway board chipset for C4 + +endchoice + +config MIGHT_HAVE_PC_SERIO + bool "Use PC serio device i8042" + select ARCH_MIGHT_HAVE_PC_SERIO + default n + +endmenu + +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +endmenu +# clear all implied options (don't want default values for those): +# Most of these machines have ISA slots; not exactly sure which don't, +# and this doesn't activate hordes of code, so do it always. +config ISA + bool + default y + help + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +config ISA_DMA_API + bool + default y + +config PCI_DOMAINS + def_bool PCI + +config PCI_DOMAINS_GENERIC + def_bool PCI + +config PCI_SYSCALL + def_bool PCI + +config IOMMU_HELPER + def_bool PCI + +config PHYSICAL_START + hex "Physical address where the kernel starts" + default "0x900000" + help + This gives the physical address where the kernel starts, and it + is 0x10000 before _text. If you plan to use kernel for capturing + the crash dump change this value to start of the reserved region + (the "X" value as specified in the "crashkernel=YM@XM" command + line boot parameter passed to the panic-ed kernel). + +config KEXEC + bool "Kexec system call (EXPERIMENTAL)" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config CRASH_DUMP + bool "Kernel crash dumps (EXPERIMENTAL)" + help + Generate crash dump after being started by kexec. + This should be normally only set in special crash dump kernels + which are loaded in the main kernel with kexec-tools into + a specially reserved region and then later executed after + a crash by kdump/kexec. The crash dump kernel must be compiled + to a memory address not used by the main kernel or firmware using + PHYSICAL_START. + +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +config GENERIC_HWEIGHT + bool + default y + +config SMP + bool "Symmetric multi-processing support" + depends on SW64 + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If + you have a system with more than one CPU, say Y. + + If you say N here, the kernel will run on single and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + singleprocessor machines. On a singleprocessor machine, the kernel + will run faster if you say N here. + + See also the SMP-HOWTO available at + . + + If you don't know what to do here, say N. + +config ARCH_PROC_KCORE_TEXT + def_bool y + +config HAVE_DEC_LOCK + bool "Use arch-specified dec_and_lock" + depends on SMP && !NUMA + default y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y + +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP && SUBARCH_C4 + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config NR_CPUS + int "Maximum number of CPUs (2-256)" + range 2 256 + depends on SMP + default "64" if UNCORE_XUELANG + help + SW6 support can handle a maximum of 256 CPUs. + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + ( Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config ARCH_SPARSEMEM_ENABLE + bool "Sparse Memory Support" + depends on SMP + select SPARSEMEM_VMEMMAP_ENABLE + +source "kernel/livepatch/Kconfig" + +config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM + select ACPI_NUMA if ACPI + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory + Access). This option is for configuring high-end multiprocessor + server machines. If in doubt, say N. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NODES_SHIFT + int + default "7" + depends on NUMA + +config RELOCATABLE + bool "Relocatable kernel" + help + This builds a kernel image that retains relocation information + so it can be loaded someplace besides the default 1MB. + The relocations make the kernel binary about 15% larger, + but are discarded at runtime + +config RELOCATION_TABLE_SIZE + hex "Relocation table size" + depends on RELOCATABLE + range 0x0 0x01000000 + default "0x80000" + help + A table of relocation data will be appended to the kernel binary + and parsed at boot to fix up the relocated kernel. + + This option allows the amount of space reserved for the table to be + adjusted, although the default of 1Mb should be ok in most cases. + + The build will fail and a valid size suggested if this is too small. + + If unsure, leave at the default value. + +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + help + Randomizes the physical and virtual address at which the + kernel image is loaded, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using any coprocessor 0 registers available. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 + default "0x10000000" + help + When kASLR is active, this provides the maximum offset that will + be applied to the kernel image. It should be set according to the + amount of physical RAM available in the target system minus + PHYSICAL_START and must be a power of 2. + + This is limited by the size of KTEXT space, 512Mb. The default is 256MB. + +config HZ + int "HZ of the short timer" + default 500 + +source "drivers/eisa/Kconfig" + +source "drivers/pcmcia/Kconfig" + +source "fs/Kconfig.binfmt" + +source "arch/sw_64/lib/Kconfig" + +endmenu + +menu "Boot options" + +config USE_OF + bool "Flattened Device Tree support" + select OF + select IRQ_DOMAIN + help + Include support for flattened device tree machine descriptions. + +config BUILTIN_DTB + bool "Embed DTB in kernel image" + depends on OF + default n + help + Embeds a device tree binary in the kernel image. + +config BUILTIN_DTB_NAME + string "Built in DTB" + depends on BUILTIN_DTB + help + Set the name of the DTB to embed, leave blank to pick one + automatically based on kernel configuration. + +config EFI + bool "UEFI runtime support" + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + + NOTE: This does *NOT* enable or encourage the use of DMI quirks, + i.e., the practice of identifying the platform via DMI to + decide whether certain workarounds for buggy hardware and/or + firmware need to be enabled. This would require the DMI subsystem + to be enabled much earlier than we do on ARM, which is non-trivial. + +config CMDLINE_BOOL + bool "Built-in kernel command line" + help + Allow for specifying boot arguments to the kernel at + build time. On some systems (e.g. embedded ones), it is + necessary or convenient to provide some or all of the + kernel boot arguments with the kernel itself (that is, + to not rely on the boot loader to provide them.) + + To compile command line arguments into the kernel, + set this option to 'Y', then fill in the + boot arguments in CONFIG_CMDLINE. + + Systems with fully functional boot loaders (i.e. non-embedded) + should leave this option set to 'N'. + +config CMDLINE + string "Built-in kernel command string" + depends on CMDLINE_BOOL + default "" + help + Enter arguments here that should be compiled into the kernel + image and used at boot time. If the boot loader provides a + command line at boot time, it is appended to this string to + form the full kernel command line, when the system boots. + + However, you can use the CONFIG_CMDLINE_OVERRIDE option to + change this behavior. + + In most cases, the command line (whether built-in or provided + by the boot loader) should specify the device for the root + file system. + +config CMDLINE_OVERRIDE + bool "Built-in command line overrides boot loader arguments" + depends on CMDLINE_BOOL + help + Set this option to 'Y' to have the kernel ignore the boot loader + command line, and use ONLY the built-in command line. + + This is used to work around broken boot loaders. This should + be set to 'N' under normal conditions. + +config FORCE_MAX_ZONEORDER + int + default "16" if (HUGETLB_PAGE) + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate up to a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + +endmenu + +source "drivers/firmware/Kconfig" + +menu "Power management options" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + depends on SW64 + def_bool y + +source "drivers/cpuidle/Kconfig" + +source "drivers/idle/Kconfig" + +endmenu + +source "arch/sw_64/kvm/Kconfig" diff --git a/arch/sw_64/Kconfig.debug b/arch/sw_64/Kconfig.debug new file mode 100644 index 000000000000..6cb3c2488b36 --- /dev/null +++ b/arch/sw_64/Kconfig.debug @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +config EARLY_PRINTK + bool "Early printk" if EXPERT + depends on SYS_HAS_EARLY_PRINTK + default y + help + This option enables special console drivers which allow the kernel + to print messages very early in the bootup process. + + This is useful for kernel debugging when your machine crashes very + early before the console code is initialized. For normal operation, + it is not recommended because it looks ugly on some machines and + doesn't cooperate with an X server. You should normally say N here, + unless you want to debug such a crash. + +config UNA_PRINT + bool "Show debug info about user unalign memory access" + default n + +config MATHEMU + tristate "Kernel FP software completion" if DEBUG_KERNEL && !SMP + default y if !DEBUG_KERNEL || SMP + help + This option is required for IEEE compliant floating point arithmetic + on the SW. The only time you would ever not say Y is to say M in + order to debug the code. Say Y unless you know what you are doing. + +config STACKTRACE_SUPPORT + bool + default y + +config SW64_RRU + bool "Enable RRU(Remote Read User)" + depends on SW64 + default n + help + Duplicate user stdout and stderr to specific space. + Do not enable it in a production kernel. + +config SW64_RRK + bool "Enable RRK(Remote Read Kernel)" + depends on SW64 + default y + help + Duplicate kernel log to specific space. + Do not enable it in a production kernel. + +config DEBUG_MATCH + bool "instruction-flow and data-flow match debugfs interface" + depends on DEBUG_FS + default n + help + Turns on the DebugFS interface for instruction-flow and data-flow match. diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile new file mode 100644 index 000000000000..84f0dca5e9f7 --- /dev/null +++ b/arch/sw_64/Makefile @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sw/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + + +archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/sw_64/tools relocs + +archheaders: + $(Q)$(MAKE) $(build)=arch/sw_64/kernel/syscalls all + +NM := $(NM) -B +CCVERSION := $(shell $(CC) -dumpversion) +LDFLAGS_vmlinux := -static -N #-relax +CHECKFLAGS += -D__sw__ + +ifeq ($(CONFIG_RELOCATABLE),y) +LDFLAGS_vmlinux += --emit-relocs +endif + +CHECKFLAGS += -D__sw__ +cflags-y := -pipe -ffixed-8 -mno-fp-regs #-msmall-data +ifeq ($(CONFIG_SUBARCH_C4),y) + cflags-y += -fsw-rev +endif +cflags-y += $(call cc-option, -fno-jump-tables) + +cflags-y += $(cpuflags-y) + +KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = xuelang_defconfig + +head-y := arch/sw_64/kernel/head.o + +core-y += arch/sw_64/ +drivers-$(CONFIG_PCI) += arch/sw_64/pci/ +libs-y += arch/sw_64/lib/ + +# export what is needed by arch/sw_64/boot/Makefile +LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y)) +export LIBS_Y + +boot := arch/sw_64/boot + +#Default target when executing make with no arguments +all: $(boot)/vmlinux.bin.gz + +$(boot)/vmlinux.bin.gz: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $@ + +bootimage bootpfile bootpzfile: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/sw_64/tools + +KBUILD_IMAGE := $(boot)/vmlinux.bin + +define archhelp + echo '* boot - Compressed kernel image (arch/sw_64/boot/vmlinux.bin.gz)' +endef diff --git a/arch/sw_64/Makefile.postlink b/arch/sw_64/Makefile.postlink new file mode 100644 index 000000000000..248844d141dd --- /dev/null +++ b/arch/sw_64/Makefile.postlink @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# =========================================================================== +# Post-link SW64 pass +# =========================================================================== +# +# 1. Insert relocations into vmlinux + +PHONY := __archpost +__archpost: + +-include include/config/auto.conf +include scripts/Kbuild.include + +CMD_RELOCS = arch/sw_64/tools/relocs +quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $@ + +# `@true` prevents complaint when there is nothing to be done + +vmlinux: FORCE + @true +ifeq ($(CONFIG_RELOCATABLE),y) + $(call if_changed,relocs) +endif + +%.ko: FORCE + @true + +clean: + @true + +PHONY += FORCE clean + +FORCE: + +.PHONY: $(PHONY) diff --git a/arch/sw_64/boot/.gitignore b/arch/sw_64/boot/.gitignore new file mode 100644 index 000000000000..8a90e24c76ab --- /dev/null +++ b/arch/sw_64/boot/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux diff --git a/arch/sw_64/boot/Makefile b/arch/sw_64/boot/Makefile new file mode 100644 index 000000000000..dd0976484649 --- /dev/null +++ b/arch/sw_64/boot/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/sw_64/boot/Makefile +# +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Based on arch/arm64/boot/Makefile. +# + +OBJCOPYFLAGS_vmlinux.bin := -O binary + +targets := vmlinux vmlinux.bin vmlinux.bin.gz + +quiet_cmd_strip = STRIP $@ + cmd_strip = $(STRIP) -o $@ $< + +# Compressed kernel image +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + @echo ' Kernel $@ is ready' + +$(obj)/vmlinux: vmlinux FORCE + $(call if_changed,strip) + +$(obj)/vmlinux.bin: $(obj)/vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/sw_64/boot/dts/Makefile b/arch/sw_64/boot/dts/Makefile new file mode 100644 index 000000000000..e32c159cab64 --- /dev/null +++ b/arch/sw_64/boot/dts/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# Built-in dtb + +ifeq ($(CONFIG_PLATFORM_XUELANG),y) +builtindtb-y := chip3 +endif + +ifeq ($(CONFIG_PLATFORM_JUNZHANG),y) +builtindtb-y := empty +endif + +ifeq ($(CONFIG_BUILTIN_DTB), y) +ifneq ($(CONFIG_BUILTIN_DTB_NAME),"") + builtindtb-y := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_NAME)) +endif + +obj-y += $(builtindtb-y).dtb.o +dtb-y := $(builtindtb-y).dtb + +# for CONFIG_OF_ALL_DTBS test +dtstree := $(srctree)/$(src) +dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +else +dtb-y := $(builtindtb-y).dtb +endif + +clean-files := *.dtb *.dtb.S diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild new file mode 100644 index 000000000000..0dd0a704d8f1 --- /dev/null +++ b/arch/sw_64/include/asm/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +generic-y += clkdev.h +generic-y += export.h +generic-y += kvm_types.h +generic-y += mcs_spinlock.h +generic-y += param.h +generic-y += qrwlock.h +generic-y += qspinlock.h +generic-y += rwsem.h +generic-y += seccomp.h +generic-y += segment.h +generic-y += types.h +generic-y += user.h + +generated-y += syscall_table.h diff --git a/arch/sw_64/include/uapi/asm/Kbuild b/arch/sw_64/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..15700040f138 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# UAPI Header export list + +generic-y += kvm_para.h +generated-y += unistd_64.h diff --git a/arch/sw_64/kernel/.gitignore b/arch/sw_64/kernel/.gitignore new file mode 100644 index 000000000000..46c9537c5551 --- /dev/null +++ b/arch/sw_64/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.lds diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile new file mode 100644 index 000000000000..abf27ad19a94 --- /dev/null +++ b/arch/sw_64/kernel/Makefile @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +extra-y := vmlinux.lds +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Wno-sign-compare + +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_insn.o = -pg +CFLAGS_REMOVE_printk.o = -pg +endif + +obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \ + irq_sw64.o signal.o setup.o ptrace.o time.o \ + systbls.o dup_print.o chip_setup.o \ + insn.o early_init.o topology.o cacheinfo.o \ + vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ + head.o termios.o + +obj-$(CONFIG_SUBARCH_C3B) += tc.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o +obj-$(CONFIG_AUDIT) += audit.o +obj-$(CONFIG_RELOCATABLE) += relocate.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o unaligned.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_DEBUG_MATCH) += match.o + +ifndef CONFIG_PCI +obj-y += pci-noop.o +endif + +# Core logic support +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o + +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_KPROBES) += kprobes/ +obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/sw_64/kernel/vmlinux.lds.S b/arch/sw_64/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..9b81b2c7afb8 --- /dev/null +++ b/arch/sw_64/kernel/vmlinux.lds.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define RUNTIME_DISCARD_EXIT +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 16 + +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) +ENTRY(__start) +PHDRS { text PT_LOAD; note PT_NOTE; } +jiffies = jiffies_64; +SECTIONS +{ + . = _TEXT_START; + + __start = .; + _text = .; /* Text and read-only data */ + _stext = .; + .text : { + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } :text + _etext = .; /* End of text section */ + + RO_DATA(PAGE_SIZE) + + /* Will be freed after init */ + __init_begin = ALIGN(PAGE_SIZE); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(L1_CACHE_BYTES) + + /* + * Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page + * needed for the THREAD_SIZE aligned init_task gets freed after init + */ + . = ALIGN(THREAD_SIZE); + __init_end = .; + /* Freed after init ends here */ + + _sdata = .; /* Start of rw data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + .got : { +#ifdef CONFIG_RELOCATABLE + _got_start = .; +#endif + *(.got) +#ifdef CONFIG_RELOCATABLE + _got_end = .; +#endif + } + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + +#ifdef CONFIG_RELOCATABLE + _. = ALIGN(4); + .data.reloc : { + _relocation_start = .; + /* + * Space for relocation table + * This needs to be filled so that the + * relocs tool can overwrite the content. + * An invalid value is left at the start of the + * section to abort relocation if the table + * has not been filled in. + */ + LONG(0xFFFFFFFF); + FILL(0); + . += CONFIG_RELOCATION_TABLE_SIZE - 4; + _relocation_end = .; + } +#endif + BSS_SECTION(0, 0, 0) + _end = .; + + .mdebug 0 : { + *(.mdebug) + } + .note 0 : { + *(.note) + } + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} -- Gitee From e9d1f969d59cb957808aa5c28076be257616cee8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:05 +0800 Subject: [PATCH 285/953] anolis: sw64: add CPU definition headers ANBZ: #4688 Add common headers (CPU definition) for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/core.h | 86 ++++++++++++++++++++++++ arch/sw_64/include/asm/cpu.h | 5 ++ arch/sw_64/include/asm/csr.h | 97 ++++++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/regdef.h | 45 +++++++++++++ 4 files changed, 233 insertions(+) create mode 100644 arch/sw_64/include/asm/core.h create mode 100644 arch/sw_64/include/asm/cpu.h create mode 100644 arch/sw_64/include/asm/csr.h create mode 100644 arch/sw_64/include/uapi/asm/regdef.h diff --git a/arch/sw_64/include/asm/core.h b/arch/sw_64/include/asm/core.h new file mode 100644 index 000000000000..2b6748cec93d --- /dev/null +++ b/arch/sw_64/include/asm/core.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CORE_H +#define _ASM_SW64_CORE_H + +#include + +#define II_II0 0 +#define II_II1 1 +#define II_SLEEP 2 +#define II_WAKE 3 +#define II_NMII 6 + +#define II_RESET II_NMII + +#if defined(CONFIG_SUBARCH_C3B) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 5 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 31 + +#define CORE_ID_BITS 5 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return 0; +} + +#elif defined(CONFIG_SUBARCH_C4) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 12 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 8 + +#define CORE_ID_BITS 6 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return rdhtctl() == 0x3; +} + +#endif + +#define DOMAIN_ID_MASK (GENMASK(DOMAIN_ID_BITS - 1, 0) << DOMAIN_ID_SHIFT) +#define THREAD_ID_MASK (GENMASK(THREAD_ID_BITS - 1, 0) << THREAD_ID_SHIFT) +#define CORE_ID_MASK (GENMASK(CORE_ID_BITS - 1, 0) << CORE_ID_SHIFT) +#define MAX_CORES_PER_CPU (1 << CORE_ID_BITS) + +/* + * 0x00 ~ 0xff for hardware mm fault + */ + +#define MMCSR__TNV 0x0 +#define MMCSR__IACV 0x1 +#define MMCSR__FOR 0x2 +#define MMCSR__FOE 0x3 +#define MMCSR__FOW 0x4 + +#define MMCSR__BAD_DVA 0x6 +#define MMCSR__ACV1 0x7 +#define MMCSR__ACV0 0xc +#define MMCSR__BAD_IVA 0xf + +/* 0x100 ~ 0x1ff for match debug */ +#define MMCSR__DA_MATCH 0x100 +#define MMCSR__DV_MATCH 0x101 +#define MMCSR__DAV_MATCH 0x102 +#define MMCSR__IA_MATCH 0x103 +#define MMCSR__IDA_MATCH 0x104 +#define MMCSR__IV_MATCH 0x105 + + /* entry.S */ +extern void entArith(void); +extern void entIF(void); +extern void entInt(void); +extern void entMM(void); +extern void entSys(void); +extern void entUna(void); +/* head.S */ +extern void __smp_callin(unsigned long args); +#endif /* _ASM_SW64_CORE_H */ diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h new file mode 100644 index 000000000000..4da30bb91d89 --- /dev/null +++ b/arch/sw_64/include/asm/cpu.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPU_H +#define _ASM_SW64_CPU_H + +#endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h new file mode 100644 index 000000000000..0610384208a4 --- /dev/null +++ b/arch/sw_64/include/asm/csr.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CSR_H +#define _ASM_SW64_CSR_H + +#include + +#define CSR_EXC_SUM 0xd +#define CSR_INT_EN 0x1a +#define CSR_INT_STAT 0x1b +#define CSR_PCIE_MSI0_INT 0x1d +#define CSR_PCIE_MSI1_INT 0x1e +#define CSR_PCIE_MSI2_INT 0x1f +#define CSR_PCIE_MSI3_INT 0x20 +#define CSR_INT_VEC 0x2d +#define CSR_PCIE_MSI0_INTEN 0x35 +#define CSR_PCIE_MSI1_INTEN 0x36 +#define CSR_PCIE_MSI2_INTEN 0x37 +#define CSR_PCIE_MSI3_INTEN 0x38 +#define CSR_EXC_GPA 0x3b +#define CSR_EXC_PC 0xe +#define CSR_AS_INFO 0x3c +#define CSR_DS_STAT 0x48 +#define CSR_SOFTCID 0xc9 +#define CSR_DVA 0x54 +#define CSR_PTBR_SYS 0x68 +#define CSR_PTBR_USR 0x69 +#define CSR_APTP 0x6a +#define CSR_CID 0xc4 +#define CSR_WR_FREGS 0xc8 +#define CSR_SHTCLOCK 0xca +#define CSR_SHTCLOCK_OFFSET 0xcb + +#ifdef CONFIG_SUBARCH_C4 +#define CSR_IA_VPNMATCH 0xa +#define CSR_UPCR 0x15 +#define CSR_VPCR 0x16 +#define CSR_IA_MATCH 0x17 +#define CSR_IA_MASK 0x18 +#define CSR_IV_MATCH 0x19 +#define CSR_IA_UPNMATCH 0x3a +#define CSR_DC_CTLP 0x4e +#define CSR_DA_MATCH 0x51 +#define CSR_DA_MASK 0x52 +#define CSR_DA_MATCH_MODE 0x53 +#define CSR_DV_MATCH 0x56 +#define CSR_DV_MASK 0x57 +#define CSR_IDA_MATCH 0xc5 +#define CSR_IDA_MASK 0xc6 + +#define DA_MATCH_EN_S 4 +#define DV_MATCH_EN_S 6 +#define DAV_MATCH_EN_S 7 +#define DPM_MATCH 8 +#define DPM_MATCH_EN_S 10 +#define IDA_MATCH_EN_S 53 +#define IV_PM_EN_S 61 +#define IV_MATCH_EN_S 62 +#define IA_MATCH_EN_S 63 + +#endif + + +#ifdef CONFIG_HAVE_CSRRW +#define read_csr(x) \ + ({ unsigned long __val; \ + __asm__ __volatile__("csrr %0,%1" : "=r"(__val) : "i"(x)); \ + __val; }) + +#define write_csr(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1" ::"r"(x), "i"(y)); }) + +#define write_csr_imb(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1; imemb" ::"r"(x), "i"(y)); }) + + +#ifndef __ASSEMBLY__ +#include +static inline void update_ptbr_sys(unsigned long ptbr) +{ + imemb(); + write_csr_imb(ptbr, CSR_PTBR_SYS); +} +#endif +#else +#define read_csr(x) (0) +#define write_csr(x, y) do { } while (0) +#define write_csr_imb(x, y) do { } while (0) + +#ifndef __ASSEMBLY__ +static inline void update_ptbr_sys(unsigned long ptbr) +{ + wrptbr(ptbr); +} +#endif + +#endif +#endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h new file mode 100644 index 000000000000..7460a987c726 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_REGDEF_H +#define _UAPI_ASM_SW64_REGDEF_H + +#define v0 $0 /* function return value */ + +#define t0 $1 /* temporary registers (caller-saved) */ +#define t1 $2 +#define t2 $3 +#define t3 $4 +#define t4 $5 +#define t5 $6 +#define t6 $7 +#define t7 $8 + +#define s0 $9 /* saved-registers (callee-saved registers) */ +#define s1 $10 +#define s2 $11 +#define s3 $12 +#define s4 $13 +#define s5 $14 +#define s6 $15 +#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ + +#define a0 $16 /* argument registers (caller-saved) */ +#define a1 $17 +#define a2 $18 +#define a3 $19 +#define a4 $20 +#define a5 $21 + +#define t8 $22 /* more temps (caller-saved) */ +#define t9 $23 +#define t10 $24 +#define t11 $25 +#define ra $26 /* return address register */ +#define t12 $27 + +#define pv t12 /* procedure-variable register */ +#define AT $at /* assembler temporary */ +#define gp $29 /* global pointer */ +#define sp $30 /* stack pointer */ +#define zero $31 /* reads as zero, writes are noops */ + +#endif /* _UAPI_ASM_SW64_REGDEF_H */ -- Gitee From 94d71d88755699f79dd945e977a968988ecbcdc8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:01 +0800 Subject: [PATCH 286/953] anolis: sw64: add atomic/locking headers ANBZ: #4688 Add common headers (atomic, bitops, barrier and locking) for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/atomic.h | 547 +++++++++++++++++++++++++++++ arch/sw_64/include/asm/barrier.h | 30 ++ arch/sw_64/include/asm/bitops.h | 566 +++++++++++++++++++++++++++++++ arch/sw_64/include/asm/cmpxchg.h | 73 ++++ arch/sw_64/include/asm/percpu.h | 19 ++ arch/sw_64/include/asm/xchg.h | 485 ++++++++++++++++++++++++++ 6 files changed, 1720 insertions(+) create mode 100644 arch/sw_64/include/asm/atomic.h create mode 100644 arch/sw_64/include/asm/barrier.h create mode 100644 arch/sw_64/include/asm/bitops.h create mode 100644 arch/sw_64/include/asm/cmpxchg.h create mode 100644 arch/sw_64/include/asm/percpu.h create mode 100644 arch/sw_64/include/asm/xchg.h diff --git a/arch/sw_64/include/asm/atomic.h b/arch/sw_64/include/asm/atomic.h new file mode 100644 index 000000000000..4a68da09722c --- /dev/null +++ b/arch/sw_64/include/asm/atomic.h @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ATOMIC_H +#define _ASM_SW64_ATOMIC_H + +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc... + * + * But use these as seldom as possible since they are much slower + * than regular operations. + */ + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) + +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +#define arch_atomic64_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) + + +#ifdef CONFIG_SUBARCH_C3B +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " seleq %0, 1, $31, %0\n" + " wr_f %0\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %0, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + + + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#else /* !CONFIG_SUBARCH_C3B */ + +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " bne %0, 2f\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldw %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstw %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldl %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstl %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#endif /* CONFIG_SUBARCH_C3B */ + +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##w) \ + ATOMIC_OP_RETURN(op, op##w) \ + ATOMIC_FETCH_OP(op, op##w) \ + ATOMIC64_OP(op, op##l) \ + ATOMIC64_OP_RETURN(op, op##l) \ + ATOMIC64_FETCH_OP(op, op##l) \ + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + + + + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, asm) \ + ATOMIC_OP(op, asm) \ + ATOMIC_FETCH_OP(op, asm) \ + ATOMIC64_OP(op, asm) \ + ATOMIC64_FETCH_OP(op, asm) \ + + +ATOMIC_OPS(and, and) +ATOMIC_OPS(andnot, bic) +ATOMIC_OPS(or, bis) +ATOMIC_OPS(xor, xor) + + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + + +#undef ATOMIC_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic64_andnot arch_atomic64_andnot + +#endif /* _ASM_SW64_ATOMIC_H */ diff --git a/arch/sw_64/include/asm/barrier.h b/arch/sw_64/include/asm/barrier.h new file mode 100644 index 000000000000..bff199126c9f --- /dev/null +++ b/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BARRIER_H +#define _ASM_SW64_BARRIER_H + +#include + +#define mb() __asm__ __volatile__("memb" : : : "memory") + +#define rmb() __asm__ __volatile__("memb" : : : "memory") + +#if defined(CONFIG_SUBARCH_C3B) +#define wmb() __asm__ __volatile__("memb" : : : "memory") +#elif defined(CONFIG_SUBARCH_C4) +#define wmb() __asm__ __volatile__("wmemb" : : : "memory") +#endif + +#define imemb() __asm__ __volatile__("imemb" : : : "memory") + +#ifdef CONFIG_SMP +#define __ASM_SMP_MB "\tmemb\n" +#else +#define __ASM_SMP_MB +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +#include + +#endif /* _ASM_SW64_BARRIER_H */ diff --git a/arch/sw_64/include/asm/bitops.h b/arch/sw_64/include/asm/bitops.h new file mode 100644 index 000000000000..b3cdabd95abf --- /dev/null +++ b/arch/sw_64/include/asm/bitops.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BITOPS_H +#define _ASM_SW64_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +#ifdef CONFIG_SUBARCH_C3B +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + * + * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). + */ + +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " selne %3, 1, $31, %1\n" //Note: here is SELNE!!! + " wr_f %1\n" + " bic %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " beq %3, 2f\n" // %3 is zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %2, 1\n" + " wr_f %2\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +#else /* !CONFIG_SUBARCH_C3B */ +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bis %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bic %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " xor %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " beq %2, 2f\n" // %2 is zero, no need to set, return + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + + +#endif /* CONFIG_SUBARCH_C3B */ + +/* + * WARNING: non atomic version. + */ +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m |= 1 << (nr & 31); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static inline void +clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +static inline void +__clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + arch___clear_bit(nr, addr); +} + +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m ^= 1 << (nr & 31); +} + +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + * + * Do a binary search on the bits. Due to the nature of large + * constants on the sw64, it is worthwhile to split the search. + */ +static inline unsigned long ffz_b(unsigned long x) +{ + unsigned long sum, x1, x2, x4; + + x = ~x & -~x; /* set first 0 bit, clear others */ + x1 = x & 0xAA; + x2 = x & 0xCC; + x4 = x & 0xF0; + sum = x2 ? 2 : 0; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +static inline unsigned long ffz(unsigned long word) +{ + return __kernel_cttz(~word); +} + +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __kernel_cttz(word); +} + +#ifdef __KERNEL__ + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above __ffs. + */ + +static inline int ffs(int word) +{ + int result = __ffs(word) + 1; + + return word ? result : 0; +} + +/* + * fls: find last bit set. + */ +static inline int fls64(unsigned long word) +{ + return 64 - __kernel_ctlz(word); +} + +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + +static inline int fls(int x) +{ + return fls64((unsigned int) x); +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return __kernel_ctpop(w); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __arch_hweight64(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight64(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight64(w & 0xff); +} + +#include + +#endif /* __KERNEL__ */ + +#ifdef __KERNEL__ + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline unsigned long +sched_find_first_bit(const unsigned long b[2]) +{ + unsigned long b0, b1, ofs, tmp; + + b0 = b[0]; + b1 = b[1]; + ofs = (b0 ? 0 : 64); + tmp = (b0 ? b0 : b1); + + return __ffs(tmp) + ofs; +} + +#include + +#include + +#include + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_BITOPS_H */ diff --git a/arch/sw_64/include/asm/cmpxchg.h b/arch/sw_64/include/asm/cmpxchg.h new file mode 100644 index 000000000000..9f51d035313d --- /dev/null +++ b/arch/sw_64/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CMPXCHG_H +#define _ASM_SW64_CMPXCHG_H + +/* + * Atomic exchange routines. + */ + +#define __ASM__MB +#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include + +#define arch_xchg_local(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ +}) + +#ifdef CONFIG_SMP +#undef __ASM__MB +#define __ASM__MB "\tmemb\n" +#endif +#undef ____xchg +#undef ____cmpxchg +#undef _ASM_SW64_XCHG_H +#define ____xchg(type, args...) __arch_xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ +}) + +#define arch_cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ +}) + +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 + +#endif /* _ASM_SW64_CMPXCHG_H */ diff --git a/arch/sw_64/include/asm/percpu.h b/arch/sw_64/include/asm/percpu.h new file mode 100644 index 000000000000..3acdf36bcf55 --- /dev/null +++ b/arch/sw_64/include/asm/percpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERCPU_H +#define _ASM_SW64_PERCPU_H + +/* + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. + * + * Always use weak definitions for percpu variables in modules. + */ +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU +#endif + +#include + +#endif /* _ASM_SW64_PERCPU_H */ diff --git a/arch/sw_64/include/asm/xchg.h b/arch/sw_64/include/asm/xchg.h new file mode 100644 index 000000000000..38f067d5ed04 --- /dev/null +++ b/arch/sw_64/include/asm/xchg.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_XCHG_H +#define _ASM_SW64_XCHG_H + +#ifndef _ASM_SW64_CMPXCHG_H +#error Do not include xchg.h directly. Use cmpxchg.h +#endif +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/cmpxchg.h. + */ + +#if defined(CONFIG_SUBARCH_C3B) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#elif defined(CONFIG_SUBARCH_C4) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#endif + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long ____cmpxchg(, volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif /* _ASM_SW64_XCHG_H */ -- Gitee From d4e09476739bbd20bb0b946e7e7062f31b65afbe Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:11 +0800 Subject: [PATCH 287/953] anolis: sw64: add common headers ANBZ: #4688 Add some other common headers for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/asm-offsets.h | 1 + arch/sw_64/include/asm/asm-prototypes.h | 22 ++ arch/sw_64/include/asm/bug.h | 8 + arch/sw_64/include/asm/device.h | 13 ++ arch/sw_64/include/asm/hmcall.h | 236 ++++++++++++++++++++++ arch/sw_64/include/asm/hw_init.h | 167 +++++++++++++++ arch/sw_64/include/asm/idle.h | 7 + arch/sw_64/include/asm/insn.h | 97 +++++++++ arch/sw_64/include/asm/linkage.h | 9 + arch/sw_64/include/asm/word-at-a-time.h | 43 ++++ arch/sw_64/include/uapi/asm/bitsperlong.h | 9 + arch/sw_64/include/uapi/asm/byteorder.h | 7 + arch/sw_64/include/uapi/asm/compiler.h | 83 ++++++++ arch/sw_64/include/uapi/asm/errno.h | 128 ++++++++++++ arch/sw_64/include/uapi/asm/hmcall.h | 17 ++ arch/sw_64/include/uapi/asm/mman.h | 88 ++++++++ arch/sw_64/include/uapi/asm/param.h | 9 + arch/sw_64/include/uapi/asm/setup.h | 7 + 18 files changed, 951 insertions(+) create mode 100644 arch/sw_64/include/asm/asm-offsets.h create mode 100644 arch/sw_64/include/asm/asm-prototypes.h create mode 100644 arch/sw_64/include/asm/bug.h create mode 100644 arch/sw_64/include/asm/device.h create mode 100644 arch/sw_64/include/asm/hmcall.h create mode 100644 arch/sw_64/include/asm/hw_init.h create mode 100644 arch/sw_64/include/asm/idle.h create mode 100644 arch/sw_64/include/asm/insn.h create mode 100644 arch/sw_64/include/asm/linkage.h create mode 100644 arch/sw_64/include/asm/word-at-a-time.h create mode 100644 arch/sw_64/include/uapi/asm/bitsperlong.h create mode 100644 arch/sw_64/include/uapi/asm/byteorder.h create mode 100644 arch/sw_64/include/uapi/asm/compiler.h create mode 100644 arch/sw_64/include/uapi/asm/errno.h create mode 100644 arch/sw_64/include/uapi/asm/hmcall.h create mode 100644 arch/sw_64/include/uapi/asm/mman.h create mode 100644 arch/sw_64/include/uapi/asm/param.h create mode 100644 arch/sw_64/include/uapi/asm/setup.h diff --git a/arch/sw_64/include/asm/asm-offsets.h b/arch/sw_64/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/sw_64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sw_64/include/asm/asm-prototypes.h b/arch/sw_64/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..67746d6bffb7 --- /dev/null +++ b/arch/sw_64/include/asm/asm-prototypes.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ASM_PROTOTYPES_H +#define _ASM_SW64_ASM_PROTOTYPES_H + +#include +#include +#include +#include +#include + +#include + +extern void __divl(void); +extern void __reml(void); +extern void __divw(void); +extern void __remw(void); +extern void __divlu(void); +extern void __remlu(void); +extern void __divwu(void); +extern void __remwu(void); + +#endif /* _ASM_SW64_ASM_PROTOTYPES_H */ diff --git a/arch/sw_64/include/asm/bug.h b/arch/sw_64/include/asm/bug.h new file mode 100644 index 000000000000..4a179f236ccf --- /dev/null +++ b/arch/sw_64/include/asm/bug.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BUG_H +#define _ASM_SW64_BUG_H + +#include +#include + +#endif /* _ASM_SW64_BUG_H */ diff --git a/arch/sw_64/include/asm/device.h b/arch/sw_64/include/asm/device.h new file mode 100644 index 000000000000..d999207e07d1 --- /dev/null +++ b/arch/sw_64/include/asm/device.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DEVICE_H +#define _ASM_SW64_DEVICE_H + +struct dev_archdata { +#if defined(CONFIG_SUNWAY_IOMMU) || defined(CONFIG_SUNWAY_IOMMU_V2) + void *iommu; +#endif +}; + +struct pdev_archdata { +}; +#endif /* _ASM_SW64_DEVICE_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h new file mode 100644 index 000000000000..e3bac3016740 --- /dev/null +++ b/arch/sw_64/include/asm/hmcall.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HMCALL_H +#define _ASM_SW64_HMCALL_H + +/* + * Common HMC-code + */ +/* 0x0 - 0x3F : Kernel Level HMC routine */ +#define HMC_halt 0x00 +#define HMC_rdio64 0x01 +#define HMC_rdio32 0x02 +#define HMC_cpuid 0x03 +#define HMC_sleepen 0x05 +#define HMC_rdksp 0x06 +#define HMC_wrasid 0x08 +#define HMC_rdktp 0x09 +#define HMC_wrktp 0x0A +#define HMC_rdptbr 0x0B +#define HMC_wrptbr 0x0C +#define HMC_rdhtctl 0x0D +#define HMC_wrksp 0x0E +#define HMC_mtinten 0x0F +#define HMC_load_mm 0x11 +#define HMC_tbisasid 0x14 +#define HMC_tbivpn 0x19 +#define HMC_ret 0x1A +#define HMC_wrvpcr 0x29 +#define HMC_wrfen 0x2B +#define HMC_sflush 0x2F +#define HMC_entervm 0x31 +#define HMC_hcall 0x32 +#define HMC_tbi 0x33 +#define HMC_wrent 0x34 +#define HMC_swpipl 0x35 +#define HMC_rdps 0x36 +#define HMC_wrkgp 0x37 +#define HMC_wrusp 0x38 +#define HMC_rvpcr 0x39 +#define HMC_rdusp 0x3A +#define HMC_wrtimer 0x3B +#define HMC_whami 0x3C +#define HMC_retsys 0x3D +#define HMC_sendii 0x3E +#define HMC_rti 0x3F + + +/* 0x80 - 0xBF : User Level HMC routine */ +#include + +/* Following will be deprecated from user level invocation */ +#define HMC_rwreg 0x87 +#define HMC_sz_uflush 0xA8 +#define HMC_longtime 0xB1 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include +extern void __init fixup_hmcall(void); + +extern void halt(void) __noreturn; + +#define __CALL_HMC_VOID(NAME) \ +static inline void NAME(void) \ +{ \ + __asm__ __volatile__( \ + "sys_call %0 " : : "i" (HMC_ ## NAME)); \ +} + +#define __CALL_HMC_R0(NAME, TYPE) \ +static inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "sys_call %1 # " #NAME \ + : "=r" (__r0) \ + : "i" (HMC_ ## NAME) \ + : "$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_W1(NAME, TYPE0) \ +static inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_W2(NAME, TYPE0, TYPE1) \ +static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_RW1(NAME, RTYPE, TYPE0) \ +static inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW3(NAME, RTYPE, TYPE0, TYPE1, TYPE2) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1, TYPE2 arg2) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + register TYPE2 __r18 __asm__("$18") = arg2; \ + __asm__ __volatile__( \ + "sys_call %4 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17), "2"(__r18) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + + +__CALL_HMC_VOID(imb); +__CALL_HMC_VOID(sflush); +__CALL_HMC_VOID(wrfen); +#define fpu_enable() wrfen() + +__CALL_HMC_VOID(sleepen); +__CALL_HMC_VOID(mtinten); + +__CALL_HMC_VOID(rdktp); +#define restore_ktp() rdktp() +__CALL_HMC_VOID(wrktp); +#define save_ktp() wrktp() + +__CALL_HMC_R0(rdps, unsigned long); + +__CALL_HMC_R0(rdusp, unsigned long); +__CALL_HMC_W1(wrusp, unsigned long); + +__CALL_HMC_R0(rdksp, unsigned long); +__CALL_HMC_W1(wrksp, unsigned long); +__CALL_HMC_R0(rdhtctl, unsigned long); + +/* + * Load a mm context. This is needed when we change the page + * table pointer(CSR:PTBR) or when we update the ASID. + * load_mm(asid, ptbr) + * + */ +__CALL_HMC_W2(load_mm, unsigned long, unsigned long); + +__CALL_HMC_W1(wrasid, unsigned long); +__CALL_HMC_R0(rdptbr, unsigned long); +__CALL_HMC_W1(wrptbr, unsigned long); + +__CALL_HMC_RW1(swpipl, unsigned long, unsigned long); +__CALL_HMC_R0(whami, unsigned long); +__CALL_HMC_RW1(rdio64, unsigned long, unsigned long); +__CALL_HMC_RW1(rdio32, unsigned int, unsigned long); +__CALL_HMC_W2(wrent, void*, unsigned long); +__CALL_HMC_W2(tbisasid, unsigned long, unsigned long); +__CALL_HMC_W1(wrkgp, unsigned long); +__CALL_HMC_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW3(sendii, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_W1(wrtimer, unsigned long); +__CALL_HMC_RW3(tbivpn, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW2(cpuid, unsigned long, unsigned long, unsigned long); + +__CALL_HMC_W1(wrtp, unsigned long); +/* + * TB routines.. + */ +#define __tbi(nr, arg, arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "sys_call %3 #__tbi" \ + : "=r" (__r16), "=r" (__r17) \ + : "0" (__r16), "i" (HMC_tbi), ##arg1 \ + : "$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x, y) __tbi(x, __r17 = (y), "1" (__r17)) + +/* Invalidate all TLB, only used by hypervisor */ +#define tbia() __tbi(-2, /* no second argument */) + +/* Invalidate TLB for all processes with currnet VPN */ +#define tbivp() __tbi(-1, /* no second argument */) + +/* Invalidate all TLB with current VPN */ +#define tbiv() __tbi(0, /* no second argument */) + +/* Invalidate ITLB of addr with current UPN and VPN */ +#define tbisi(addr) __tbi(1, __r17 = (addr), "1" (__r17)) + +/* Invalidate DTLB of addr with current UPN and VPN */ +#define tbisd(addr) __tbi(2, __r17 = (addr), "1" (__r17)) + +/* Invalidate TLB of addr with current UPN and VPN */ +#define tbis(addr) __tbi(3, __r17 = (addr), "1" (__r17)) + +/* Invalidate all user TLB with current UPN and VPN */ +#define tbiu() __tbi(4, /* no second argument */) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h new file mode 100644 index 000000000000..2078c66d1c4f --- /dev/null +++ b/arch/sw_64/include/asm/hw_init.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_INIT_H +#define _ASM_SW64_HW_INIT_H +#include +#include + +#include + +#define MMSIZE __va(0x2040) + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned int size; /* Bytes per way */ + unsigned int sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +struct cpuinfo_sw64 { + unsigned long last_asid; + unsigned long last_vpn; + unsigned long ipi_count; + struct cache_desc icache; /* Primary I-cache */ + struct cache_desc dcache; /* Primary D or combined I/D cache */ + struct cache_desc scache; /* Secondary cache */ + struct cache_desc tcache; /* Tertiary/split secondary cache */ +} __aligned(SMP_CACHE_BYTES); + +struct cpu_desc_t { + __u8 model; + __u8 family; + __u8 chip_var; + __u8 arch_var; + __u8 arch_rev; + __u8 pa_bits; + __u8 va_bits; + char vendor_id[16]; + char model_id[64]; + unsigned long frequency; +} __randomize_layout; + +#define MAX_NUMSOCKETS 8 +struct socket_desc_t { + bool is_online; /* 1 for online, 0 for offline */ + int numcores; + unsigned long socket_mem; +}; + +enum memmap_types { + memmap_reserved, + memmap_pci, + memmap_initrd, + memmap_kvm, + memmap_crashkernel, + memmap_acpi, + memmap_use, + memmap_protected, +}; + +#define MAX_NUMMEMMAPS 64 +struct memmap_entry { + u64 addr; /* start of memory segment */ + u64 size; /* size of memory segment */ + enum memmap_types type; +}; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; +extern void store_cpu_data(int cpu); + +extern struct cpu_desc_t cpu_desc; +extern struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +extern int memmap_nr; +extern struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +extern cpumask_t cpu_offline; +extern bool memblock_initialized; + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type); +void __init process_memmap(void); + +static inline unsigned long get_cpu_freq(void) +{ + return cpu_desc.frequency; +} + +static inline void update_cpu_freq(unsigned long khz) +{ + cpu_desc.frequency = khz * 1000; +} + +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) + +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); + +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) + +#define CPU_SW3231 0x31 +#define CPU_SW831 0x32 +#define CPU_SW8A 0x41 + +#define GET_TABLE_ENTRY 1 +#define GET_VENDOR_ID 2 +#define GET_MODEL 3 +#define GET_CPU_FREQ 4 +#define GET_CACHE_INFO 5 + +#define TABLE_ENTRY_MAX 32 +#define VENDOR_ID_MAX 2 +#define MODEL_MAX 8 +#define CACHE_INFO_MAX 4 + +#define L1_ICACHE 0 +#define L1_DCACHE 1 +#define L2_CACHE 2 +#define L3_CACHE 3 + +#define CPUID_ARCH_REV_MASK 0xf +#define CPUID_ARCH_REV(val) ((val) & CPUID_ARCH_REV_MASK) +#define CPUID_ARCH_VAR_SHIFT 4 +#define CPUID_ARCH_VAR_MASK (0xf << CPUID_ARCH_VAR_SHIFT) +#define CPUID_ARCH_VAR(val) \ + (((val) & CPUID_ARCH_VAR_MASK) >> CPUID_ARCH_VAR_SHIFT) +#define CPUID_CHIP_VAR_SHIFT 8 +#define CPUID_CHIP_VAR_MASK (0xf << CPUID_CHIP_VAR_SHIFT) +#define CPUID_CHIP_VAR(val) \ + (((val) & CPUID_CHIP_VAR_MASK) >> CPUID_CHIP_VAR_SHIFT) +#define CPUID_FAMILY_SHIFT 12 +#define CPUID_FAMILY_MASK (0xf << CPUID_FAMILY_SHIFT) +#define CPUID_FAMILY(val) \ + (((val) & CPUID_FAMILY_MASK) >> CPUID_FAMILY_SHIFT) +#define CPUID_MODEL_SHIFT 24 +#define CPUID_MODEL_MASK (0xff << CPUID_MODEL_SHIFT) +#define CPUID_MODEL(val) \ + (((val) & CPUID_MODEL_MASK) >> CPUID_MODEL_SHIFT) +#define CPUID_PA_BITS_SHIFT 32 +#define CPUID_PA_BITS_MASK (0x7fUL << CPUID_PA_BITS_SHIFT) +#define CPUID_PA_BITS(val) \ + (((val) & CPUID_PA_BITS_MASK) >> CPUID_PA_BITS_SHIFT) +#define CPUID_VA_BITS_SHIFT 39 +#define CPUID_VA_BITS_MASK (0x7fUL << CPUID_VA_BITS_SHIFT) +#define CPUID_VA_BITS(val) \ + (((val) & CPUID_VA_BITS_MASK) >> CPUID_VA_BITS_SHIFT) + + +#define CACHE_SIZE_SHIFT 0 +#define CACHE_SIZE_MASK (0xffffffffUL << CACHE_SIZE_SHIFT) +#define CACHE_SIZE(val) \ + (((val) & CACHE_SIZE_MASK) >> CACHE_SIZE_SHIFT) +#define CACHE_LINE_BITS_SHIFT 32 +#define CACHE_LINE_BITS_MASK (0xfUL << CACHE_LINE_BITS_SHIFT) +#define CACHE_LINE_BITS(val) \ + (((val) & CACHE_LINE_BITS_MASK) >> CACHE_LINE_BITS_SHIFT) +#define CACHE_INDEX_BITS_SHIFT 36 +#define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) +#define CACHE_INDEX_BITS(val) \ + (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] + +#endif /* _ASM_SW64_HW_INIT_H */ diff --git a/arch/sw_64/include/asm/idle.h b/arch/sw_64/include/asm/idle.h new file mode 100644 index 000000000000..95e145f25306 --- /dev/null +++ b/arch/sw_64/include/asm/idle.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IDLE_H +#define _ASM_SW64_IDLE_H + +extern void arch_cpu_idle(void); + +#endif /* _ASM_SW64_IDLE_H */ diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h new file mode 100644 index 000000000000..437cb48d1e93 --- /dev/null +++ b/arch/sw_64/include/asm/insn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_INSN_H +#define _ASM_SW64_INSN_H +#include + +/* Register numbers */ +enum { + R26 = 26, + R27, + R28, + R31 = 31, +}; + +#define BR_MAX_DISP 0xfffff +/* SW64 instructions are always 32 bits. */ +#define SW64_INSN_SIZE 4 + +#define ___SW64_RA(a) (((a) & 0x1f) << 21) +#define ___SW64_RB(b) (((b) & 0x1f) << 16) +#define ___SW64_SIMP_RC(c) (((c) & 0x1f)) +#define ___SW64_ST_DISP(disp) (((disp) & 0xffff)) +#define ___SW64_SYSCALL_FUNC(func) ((func) & 0xff) +#define ___SW64_BR_DISP(disp) (((disp) & 0x1fffff)) + + +#define SW64_INSN_BIS 0x40000740 +#define SW64_INSN_CALL 0x04000000 +#define SW64_INSN_SYS_CALL 0x02000000 +#define SW64_INSN_BR 0x10000000 + +#define SW64_NOP (0x43ff075f) +#define SW64_BIS(a, b, c) (SW64_INSN_BIS | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_SIMP_RC(c)) +#define SW64_CALL(a, b, disp) (SW64_INSN_CALL | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_ST_DISP(disp)) +#define SW64_SYS_CALL(func) (SW64_INSN_SYS_CALL | ___SW64_SYSCALL_FUNC(func)) +#define SW64_BR(a, disp) (SW64_INSN_BR | ___SW64_RA(a) | ___SW64_BR_DISP(disp)) + +extern int sw64_insn_read(void *addr, u32 *insnp); +extern int sw64_insn_write(void *addr, u32 insn); +extern int sw64_insn_double_write(void *addr, u64 insn); +extern unsigned int sw64_insn_nop(void); +extern unsigned int sw64_insn_call(unsigned int ra, unsigned int rb); +extern unsigned int sw64_insn_sys_call(unsigned int num); +extern unsigned int sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc); + +#define SW64_OPCODE_RA(opcode) ((opcode >> 21) & 0x1f) + +#define SW64_INSN(name, opcode, mask) \ +static inline bool sw64_insn_is_##name(u32 insn) \ +{ \ + return (insn & mask) == opcode; \ +} + +SW64_INSN(sys_call_b, 0x00000000, 0xfc000000); +SW64_INSN(sys_call, 0x00000001, 0xfc000000); +SW64_INSN(call, 0x04000000, 0xfc000000); +SW64_INSN(ret, 0x08000000, 0xfc000000); +SW64_INSN(jmp, 0x0c000000, 0xfc000000); +SW64_INSN(br, 0x10000000, 0xfc000000); +SW64_INSN(bsr, 0x14000000, 0xfc000000); +SW64_INSN(memb, 0x18000000, 0xfc00ffff); +SW64_INSN(imemb, 0x18000001, 0xfc00ffff); +SW64_INSN(rtc, 0x18000020, 0xfc00ffff); +SW64_INSN(halt, 0x18000080, 0xfc00ffff); +SW64_INSN(rd_f, 0x18001000, 0xfc00ffff); +SW64_INSN(beq, 0xc0000000, 0xfc000000); +SW64_INSN(bne, 0xc4000000, 0xfc000000); +SW64_INSN(blt, 0xc8000000, 0xfc000000); +SW64_INSN(ble, 0xcc000000, 0xfc000000); +SW64_INSN(bgt, 0xd0000000, 0xfc000000); +SW64_INSN(bge, 0xd4000000, 0xfc000000); +SW64_INSN(blbc, 0xd8000000, 0xfc000000); +SW64_INSN(blbs, 0xdc000000, 0xfc000000); +SW64_INSN(fbeq, 0xe0000000, 0xfc000000); +SW64_INSN(fbne, 0xe4000000, 0xfc000000); +SW64_INSN(fblt, 0xe8000000, 0xfc000000); +SW64_INSN(fble, 0xec000000, 0xfc000000); +SW64_INSN(fbgt, 0xf0000000, 0xfc000000); +SW64_INSN(fbge, 0xf4000000, 0xfc000000); +SW64_INSN(lldw, 0x20000000, 0xfc00f000); +SW64_INSN(lldl, 0x20001000, 0xfc00f000); + +#endif /* _ASM_SW64_INSN_H */ diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h new file mode 100644 index 000000000000..85b279f6211e --- /dev/null +++ b/arch/sw_64/include/asm/linkage.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_LINKAGE_H +#define _ASM_SW64_LINKAGE_H + +#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") +#define SYSCALL_ALIAS(alias, name) \ + asm (#alias " = " #name "\n\t.globl " #alias) + +#endif /* _ASM_SW64_LINKAGE_H */ diff --git a/arch/sw_64/include/asm/word-at-a-time.h b/arch/sw_64/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..623efbec4429 --- /dev/null +++ b/arch/sw_64/include/asm/word-at-a-time.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_WORD_AT_A_TIME_H +#define _ASM_SW64_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for SW64. + */ + +/* + * We do not use the word_at_a_time struct on SW64, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpgeb(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ + return __kernel_cttz(bits); +} + +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + +#endif /* _ASM_SW64_WORD_AT_A_TIME_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..712c823e23d8 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BITSPERLONG_H +#define _UAPI_ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* _UAPI_ASM_SW64_BITSPERLONG_H */ diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..ededdd045e96 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BYTEORDER_H +#define _UAPI_ASM_SW64_BYTEORDER_H + +#include + +#endif /* _UAPI_ASM_SW64_BYTEORDER_H */ diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h new file mode 100644 index 000000000000..64786df0f266 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_COMPILER_H +#define _UAPI_ASM_SW64_COMPILER_H + +/* + * Herein are macros we use when describing various patterns we want to GCC. + * In all cases we can get better schedules out of the compiler if we hide + * as little as possible inside inline assembly. However, we want to be + * able to know what we'll get out before giving up inline assembly. Thus + * these tests and macros. + */ + +#define __kernel_inslb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inslh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_insll(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("insll %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inshw(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inshw %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_cmpgeb(a, b) \ +({ \ + unsigned long __kir; \ + __asm__("cmpgeb %r2, %1, %0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ + __kir; \ +}) + +#define __kernel_cttz(x) \ +({ \ + unsigned long __kir; \ + __asm__("cttz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctlz(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctlz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctpop(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctpop %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#endif /* _UAPI_ASM_SW64_COMPILER_H */ diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 000000000000..969ee99ee86c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_ERRNO_H +#define _UAPI_ASM_SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _UAPI_ASM_SW64_ERRNO_H */ diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h new file mode 100644 index 000000000000..6867fb7b4d24 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_HMCALL_H +#define _UAPI_ASM_SW64_HMCALL_H + +/* hmcall may be used in user mode */ + +#define HMC_bpt 0x80 +#define HMC_callsys 0x83 +#define HMC_imb 0x86 +#define HMC_rdtp 0x9E +#define HMC_wrtp 0x9F +#define HMC_rdunique HMC_rdtp +#define HMC_wrunique HMC_wrtp +#define HMC_gentrap 0xAA +#define HMC_wrperfmon 0xB0 + +#endif /* _UAPI_ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 000000000000..15cb7bfee3b1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_MMAN_H +#define _UAPI_ASM_SW64_MMAN_H + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x100 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x10 /* don't use a file */ + +/* not used by linux, may be deprecated */ +#define _MAP_HASSEMAPHORE 0x0200 +#define _MAP_INHERIT 0x0400 +#define _MAP_UNALIGNED 0x0800 + +/* These are linux-specific */ +#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ +#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ +#define MAP_LOCKED 0x08000 /* lock the mapping */ +#define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x200000 /* MAP_FIXED which doesn't unmap underlying mapping */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_SYNC 2 /* synchronous memory sync */ +#define MS_INVALIDATE 4 /* invalidate the caches */ + +#define MCL_CURRENT 8192 /* lock all currently mapped pages */ +#define MCL_FUTURE 16384 /* lock all additions to address space */ +#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_SPACEAVAIL 5 /* ensure resources are available */ +#define MADV_DONTNEED 6 /* don't need these pages */ + +/* common/generic parameters */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + +/* compatibility flags */ +#define MAP_FILE 0 + + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + +#endif /* _UAPI_ASM_SW64_MMAN_H */ diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h new file mode 100644 index 000000000000..d38e8202dd97 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/param.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PARAM_H +#define _UAPI_ASM_SW64_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include + +#endif /* _UAPI_ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h new file mode 100644 index 000000000000..e6cca4525049 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SETUP_H +#define _UAPI_ASM_SW64_SETUP_H + +#define COMMAND_LINE_SIZE 2048 + +#endif /* _UAPI_ASM_SW64_SETUP_H */ -- Gitee From 89334d3254da7cbdc8d3a89bf147b446c5387d43 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 17:14:53 +0800 Subject: [PATCH 288/953] anolis: sw64: add ELF support ANBZ: #4688 Add ELF-related definition for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/elf.h | 152 +++++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/auxvec.h | 11 ++ 2 files changed, 163 insertions(+) create mode 100644 arch/sw_64/include/asm/elf.h create mode 100644 arch/sw_64/include/uapi/asm/auxvec.h diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h new file mode 100644 index 000000000000..95ba89a1aa9d --- /dev/null +++ b/arch/sw_64/include/asm/elf.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ELF_H +#define _ASM_SW64_ELF_H +#ifdef __KERNEL__ +#include +#endif +/* Special values for the st_other field in the symbol table. */ + + +#define STO_SW64_NOPV 0x80 +#define STO_SW64_STD_GPLOAD 0x88 + +/* + * SW-64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +#define SHF_SW64_GPREL 0x10000000 + +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_SW64_32BIT 1 /* All addresses are below 2GB */ + +/* + * ELF register definitions. + * + * For now, we just leave it at 33 (32 general regs + processor status word). + */ +#define ELF_NGREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Same with user_fpsimd_state */ +#include +typedef struct user_fpsimd_state elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_SW64) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SW64 + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + +/* + * $0 is set by ld.so to a pointer to a function which might be + * registered using atexit. This provides a mean for the dynamic + * linker to call DT_FINI functions for shared libraries that have + * been loaded before the code runs. + + * So that we can use the same startup file with static executables, + * we start programs with a value of 0 to indicate that there is no + * such function. + */ + +#define ELF_PLAT_INIT(_r, load_addr) (_r->regs[0] = 0) + +/* + * The registers are laid out in pt_regs for HMCODE and syscall + * convenience. Re-order them for the linear elf_gregset_t. + */ + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +#ifdef __KERNEL__ +struct pt_regs; +struct task_struct; +extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS); + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. + */ + +#define ELF_HWCAP 0 + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM ("sw_64") + + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) + +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk +#endif + +#endif /* _ASM_SW64_ELF_H */ diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..309a8294be7a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_AUXVEC_H +#define _UAPI_ASM_SW64_AUXVEC_H + +/* VDSO location. */ +#define AT_SYSINFO_EHDR 33 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 + +#endif /* _UAPI_ASM_SW64_AUXVEC_H */ -- Gitee From d9267186ca4ed55fdb45c8fa50845fadd5025b77 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:13 +0800 Subject: [PATCH 289/953] anolis: sw64: add some other headers ANBZ: #4688 Add some other uncommon headers for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/ast2400.h | 168 +++++++++++++++++++++++++ arch/sw_64/include/asm/socket.h | 11 ++ arch/sw_64/include/uapi/asm/fcntl.h | 58 +++++++++ arch/sw_64/include/uapi/asm/ioctl.h | 19 +++ arch/sw_64/include/uapi/asm/ioctls.h | 128 +++++++++++++++++++ arch/sw_64/include/uapi/asm/resource.h | 16 +++ arch/sw_64/include/uapi/asm/socket.h | 161 ++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/sockios.h | 17 +++ arch/sw_64/include/uapi/asm/stat.h | 50 ++++++++ arch/sw_64/include/uapi/asm/termbits.h | 167 ++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/termios.h | 70 +++++++++++ 11 files changed, 865 insertions(+) create mode 100644 arch/sw_64/include/asm/ast2400.h create mode 100644 arch/sw_64/include/asm/socket.h create mode 100644 arch/sw_64/include/uapi/asm/fcntl.h create mode 100644 arch/sw_64/include/uapi/asm/ioctl.h create mode 100644 arch/sw_64/include/uapi/asm/ioctls.h create mode 100644 arch/sw_64/include/uapi/asm/resource.h create mode 100644 arch/sw_64/include/uapi/asm/socket.h create mode 100644 arch/sw_64/include/uapi/asm/sockios.h create mode 100644 arch/sw_64/include/uapi/asm/stat.h create mode 100644 arch/sw_64/include/uapi/asm/termbits.h create mode 100644 arch/sw_64/include/uapi/asm/termios.h diff --git a/arch/sw_64/include/asm/ast2400.h b/arch/sw_64/include/asm/ast2400.h new file mode 100644 index 000000000000..5f4cc84ff3a8 --- /dev/null +++ b/arch/sw_64/include/asm/ast2400.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Weiqiang Su + * + * Both AST2400D and AST2400F package variants are supported. + */ + +#ifndef _ASM_SW64_AST2400_H +#define _ASM_SW64_AST2400_H + +#include + +/* Logical Device Numbers (LDN). */ +#define AST2400_FDC 0x00 /* Floppy */ +#define AST2400_PP 0x01 /* Parallel port */ +#define AST2400_SP1 0x02 /* Com1 */ +#define AST2400_SP2 0x03 /* Com2 & IR */ +#define AST2400_KBC 0x05 /* PS/2 keyboard and mouse */ +#define AST2400_CIR 0x06 +#define AST2400_GPIO6789_V 0x07 +#define AST2400_WDT1_GPIO01A_V 0x08 +#define AST2400_GPIO1234567_V 0x09 +#define AST2400_ACPI 0x0A +#define AST2400_HWM_FPLED 0x0B /* Hardware monitor & front LED */ +#define AST2400_VID 0x0D +#define AST2400_CIRWKUP 0x0E /* CIR wakeup */ +#define AST2400_GPIO_PP_OD 0x0F /* GPIO Push-Pull/Open drain select */ +#define AST2400_SVID 0x14 +#define AST2400_DSLP 0x16 /* Deep sleep */ +#define AST2400_GPIOA_LDN 0x17 + +/* virtual LDN for GPIO and WDT */ +#define AST2400_WDT1 ((0 << 8) | AST2400_WDT1_GPIO01A_V) + +#define AST2400_GPIOBASE ((0 << 8) | AST2400_WDT1_GPIO01A_V) //? + +#define AST2400_GPIO0 ((1 << 8) | AST2400_WDT1_GPIO01A_V) +#define AST2400_GPIO1 ((1 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO2 ((2 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO3 ((3 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO4 ((4 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO5 ((5 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO6 ((6 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO7 ((7 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO8 ((0 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIO9 ((1 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIOA ((2 << 8) | AST2400_WDT1_GPIO01A_V) + +#define SUPERIO_PNP_PORT 0x2E +#define SUPERIO_CHIPID 0xC333 + +struct device_operations; +typedef struct pnp_device { + unsigned int port; + unsigned int device; + + struct device_operations *ops; +} *device_t; + +struct pnp_mode_ops { + void (*enter_conf_mode)(device_t dev); + void (*exit_conf_mode)(device_t dev); +}; + + +struct device_operations { + void (*read_resources)(device_t dev); + void (*set_resources)(device_t dev); + void (*enable_resources)(device_t dev); + void (*init)(device_t dev); + void (*final)(device_t dev); + void (*enable)(device_t dev); + void (*disable)(device_t dev); + + const struct pnp_mode_ops *ops_pnp_mode; +}; + +/* PNP helper operations */ +struct io_info { + unsigned int mask, set; +}; + +struct pnp_info { + bool enabled; /* set if we should enable the device */ + struct pnp_device pnp_device; + unsigned int function; /* Must be at least 16 bits (virtual LDNs)! */ +}; + +/* Chip operations */ +struct chip_operations { + void (*enable_dev)(struct device *dev); + void (*init)(void *chip_info); + void (*final)(void *chip_info); + unsigned int initialized : 1; + unsigned int finalized : 1; + const char *name; +}; + +typedef struct superio_ast2400_device { + struct device *dev; + const char *name; + unsigned int enabled : 1; /* set if we should enable the device */ + unsigned int superio_ast2400_efir; /* extended function index register */ + unsigned int superio_ast2400_efdr; /* extended function data register */ + struct chip_operations *chip_ops; + const void *chip_info; +} *superio_device_t; + + +static inline void pnp_enter_conf_mode_a5a5(device_t dev) +{ + outb(0xa5, dev->port); + outb(0xa5, dev->port); +} + +static inline void pnp_exit_conf_mode_aa(device_t dev) +{ + outb(0xaa, dev->port); +} + +/* PNP config mode wrappers */ + +static inline void pnp_enter_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->enter_conf_mode(dev); +} + +static inline void pnp_exit_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->exit_conf_mode(dev); +} + +/* PNP device operations */ +static inline u8 pnp_read_config(device_t dev, u8 reg) +{ + outb(reg, dev->port); + return inb(dev->port + 1); +} + +static inline void pnp_write_config(device_t dev, u8 reg, u8 value) +{ + outb(reg, dev->port); + outb(value, dev->port + 1); +} + +static inline void pnp_set_logical_device(device_t dev) +{ + pnp_write_config(dev, 0x07, dev->device & 0xff); +// pnp_write_config(dev, 0x07, 0x3); +} + +static inline void pnp_set_enable(device_t dev, int enable) +{ + u8 tmp; + + tmp = pnp_read_config(dev, 0x30); + + if (enable) + tmp |= 1; + else + tmp &= ~1; + + pnp_write_config(dev, 0x30, tmp); +} + +#endif /* _ASM_SW64_AST2400_H */ diff --git a/arch/sw_64/include/asm/socket.h b/arch/sw_64/include/asm/socket.h new file mode 100644 index 000000000000..e87043467775 --- /dev/null +++ b/arch/sw_64/include/asm/socket.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SOCKET_H +#define _ASM_SW64_SOCKET_H + +#include + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 +#endif /* _ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h new file mode 100644 index 000000000000..be2daae2cc4d --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FCNTL_H +#define _UAPI_ASM_SW64_FCNTL_H + +#define O_CREAT 01000 /* not fcntl */ +#define O_TRUNC 02000 /* not fcntl */ +#define O_EXCL 04000 /* not fcntl */ +#define O_NOCTTY 010000 /* not fcntl */ + +#define O_NONBLOCK 00004 +#define O_APPEND 00010 +#define O_DSYNC 040000 /* used to be O_SYNC, see below */ +#define O_DIRECTORY 0100000 /* must be a directory */ +#define O_NOFOLLOW 0200000 /* don't follow links */ +#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ +#define O_DIRECT 02000000 /* direct disk access */ +#define O_NOATIME 04000000 +#define O_CLOEXEC 010000000 /* set close_on_exec */ +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 020000000 +#define O_SYNC (__O_SYNC|O_DSYNC) + +#define O_PATH 040000000 +#define __O_TMPFILE 0100000000 + +#define F_GETLK 7 +#define F_SETLK 8 +#define F_SETLKW 9 + +#define F_SETOWN 5 /* for sockets. */ +#define F_GETOWN 6 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 1 +#define F_WRLCK 2 +#define F_UNLCK 8 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 16 /* or 3 */ +#define F_SHLCK 32 /* or 4 */ + +#include + +#endif /* _UAPI_ASM_SW64_FCNTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h new file mode 100644 index 000000000000..fb5267b034fc --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTL_H +#define _UAPI_ASM_SW64_IOCTL_H + +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +/* + * Direction bits _IOC_NONE could be 0, but legacy version gives it a bit. + * And this turns out useful to catch old ioctl numbers in header files for + * us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#include + +#endif /* _UAPI_ASM_SW64_IOCTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h new file mode 100644 index 000000000000..36a7fc205aa7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTLS_H +#define _UAPI_ASM_SW64_IOCTLS_H + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TIOCGETP _IOR('t', 8, struct sgttyb) +#define TIOCSETP _IOW('t', 9, struct sgttyb) +#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ + +#define TIOCSETC _IOW('t', 17, struct tchars) +#define TIOCGETC _IOR('t', 18, struct tchars) +#define TCGETS _IOR('t', 19, struct termios) +#define TCSETS _IOW('t', 20, struct termios) +#define TCSETSW _IOW('t', 21, struct termios) +#define TCSETSF _IOW('t', 22, struct termios) + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCGLTC _IOR('t', 116, struct ltchars) +#define TIOCSLTC _IOW('t', 117, struct ltchars) +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E + +#define TIOCSTI 0x5412 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 + + +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ +#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ +#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816) +#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816) + +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ + +#endif /* _UAPI_ASM_SW64_IOCTLS_H */ diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h new file mode 100644 index 000000000000..2e1ce8f6ee64 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_RESOURCE_H +#define _UAPI_ASM_SW64_RESOURCE_H + +/* + * SW-64/Linux-specific ordering of these four resource limit IDs, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 6 /* max number of open files */ +#define RLIMIT_AS 7 /* address space limit */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +#include + +#endif /* _UAPI_ASM_SW64_RESOURCE_H */ diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h new file mode 100644 index 000000000000..1094d11fff5b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKET_H +#define _UAPI_ASM_SW64_SOCKET_H + +#include +#include + +/* For setsockopt(2) */ +/* + * Note: we only bother about making the SOL_SOCKET options + * same as legacy, as that's all that "normal" programs are + * likely to set. We don't necessarily want to be binary + * compatible with _everything_. + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 +#define SO_REUSEADDR 0x0004 +#define SO_KEEPALIVE 0x0008 +#define SO_DONTROUTE 0x0010 +#define SO_BROADCAST 0x0020 +#define SO_LINGER 0x0080 +#define SO_OOBINLINE 0x0100 +#define SO_REUSEPORT 0x0200 + +#define SO_TYPE 0x1008 +#define SO_ERROR 0x1007 +#define SO_SNDBUF 0x1001 +#define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b +#define SO_RCVLOWAT 0x1010 +#define SO_SNDLOWAT 0x1011 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 +#define SO_ACCEPTCONN 0x1014 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER + +#define SO_PEERNAME 28 + +#define SO_PEERSEC 30 +#define SO_PASSSEC 34 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 19 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 +#define SO_SECURITY_ENCRYPTION_NETWORK 21 + +#define SO_MARK 36 + +#define SO_RXQ_OVFL 40 + +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS +#define SO_PEEK_OFF 42 + +/* Instruct lower device to use last 4-bytes of skb data as FCS */ +#define SO_NOFCS 43 + +#define SO_LOCK_FILTER 44 +#define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 +#define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + +#define SO_ATTACH_REUSEPORT_CBPF 51 +#define SO_ATTACH_REUSEPORT_EBPF 52 + +#define SO_CNX_ADVICE 53 + +#define SCM_TIMESTAMPING_OPT_STATS 54 + +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + +#define SCM_TIMESTAMPING_PKTINFO 58 + +#define SO_PEERGROUPS 59 + +#define SO_ZEROCOPY 60 + +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#define SO_DETACH_REUSEPORT_BPF 68 + +#define SO_PREFER_BUSY_POLL 69 +#define SO_BUSY_POLL_BUDGET 70 + +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + +#define SO_RESERVE_MEM 73 + +#define SO_TXREHASH 74 + +#define SO_RCVMARK 75 + +#define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + +#endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h new file mode 100644 index 000000000000..88e89dcf8300 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKIOS_H +#define _UAPI_ASM_SW64_SOCKIOS_H + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */ + +#endif /* _UAPI_ASM_SW64_SOCKIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h new file mode 100644 index 000000000000..677a75f1cf5b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_STAT_H +#define _UAPI_ASM_SW64_STAT_H + +struct stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +/* The stat64 structure increases the size of dev_t, blkcnt_t, adds + * nanosecond resolution times, and padding for expansion. + */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_rdev; + long st_size; + unsigned long st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + long __unused[3]; +}; + +#endif /* _UAPI_ASM_SW64_STAT_H */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h new file mode 100644 index 000000000000..a71aaf33c26c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMBITS_H +#define _UAPI_ASM_SW64_TERMBITS_H + +#include + +typedef unsigned int tcflag_t; + +/* + * termios type and macro definitions. Be careful about adding stuff + * to this file since it's used in GNU libc and there are strict rules + * concerning namespace pollution. + */ + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has matching termios and ktermios */ + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VEOF 0 +#define VEOL 1 +#define VEOL2 2 +#define VERASE 3 +#define VWERASE 4 +#define VKILL 5 +#define VREPRINT 6 +#define VSWTC 7 +#define VINTR 8 +#define VQUIT 9 +#define VSUSP 10 +#define VSTART 12 +#define VSTOP 13 +#define VLNEXT 14 +#define VDISCARD 15 +#define VMIN 16 +#define VTIME 17 + +/* c_iflag bits */ +#define IXON 0x0200 +#define IXOFF 0x0400 +#define IUCLC 0x1000 +#define IMAXBEL 0x2000 +#define IUTF8 0x4000 + +/* c_oflag bits */ +#define ONLCR 0x00002 +#define OLCUC 0x00004 +#define NLDLY 0x00300 +#define NL0 0x00000 +#define NL1 0x00100 +#define NL2 0x00200 +#define NL3 0x00300 +#define TABDLY 0x00c00 +#define TAB0 0x00000 +#define TAB1 0x00400 +#define TAB2 0x00800 +#define TAB3 0x00c00 +#define CRDLY 0x03000 +#define CR0 0x00000 +#define CR1 0x01000 +#define CR2 0x02000 +#define CR3 0x03000 +#define FFDLY 0x04000 +#define FF0 0x00000 +#define FF1 0x04000 +#define BSDLY 0x08000 +#define BS0 0x00000 +#define BS1 0x08000 +#define VTDLY 0x10000 +#define VT0 0x00000 +#define VT1 0x10000 +/* + * Should be equivalent to TAB3, see description of TAB3 in + * POSIX.1-2008, Ch. 11.2.3 "Output Modes" + */ +#define XTABS TAB3 + +/* c_cflag bit meaning */ +#define CBAUD 0x0000001f +#define CBAUDEX 0x00000000 +#define BOTHER 0x0000001f +#define B57600 0x00000010 +#define B115200 0x00000011 +#define B230400 0x00000012 +#define B460800 0x00000013 +#define B500000 0x00000014 +#define B576000 0x00000015 +#define B921600 0x00000016 +#define B1000000 0x00000017 +#define B1152000 0x00000018 +#define B1500000 0x00000019 +#define B2000000 0x0000001a +#define B2500000 0x0000001b +#define B3000000 0x0000001c +#define B3500000 0x0000001d +#define B4000000 0x0000001e +#define CSIZE 0x00000300 +#define CS5 0x00000000 +#define CS6 0x00000100 +#define CS7 0x00000200 +#define CS8 0x00000300 +#define CSTOPB 0x00000400 +#define CREAD 0x00000800 +#define PARENB 0x00001000 +#define PARODD 0x00002000 +#define HUPCL 0x00004000 +#define CLOCAL 0x00008000 +#define CIBAUD 0x001f0000 + +/* c_lflag bits */ +#define ISIG 0x00000080 +#define ICANON 0x00000100 +#define XCASE 0x00004000 +#define ECHO 0x00000008 +#define ECHOE 0x00000002 +#define ECHOK 0x00000004 +#define ECHONL 0x00000010 +#define NOFLSH 0x80000000 +#define TOSTOP 0x00400000 +#define ECHOCTL 0x00000040 +#define ECHOPRT 0x00000020 +#define ECHOKE 0x00000001 +#define FLUSHO 0x00800000 +#define PENDIN 0x20000000 +#define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 + +/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _UAPI_ASM_SW64_TERMBITS_H */ diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h new file mode 100644 index 000000000000..62f4b40551b2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMIOS_H +#define _UAPI_ASM_SW64_TERMIOS_H + +#include +#include + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with sysV)... + */ +#define _VINTR 0 +#define _VQUIT 1 +#define _VERASE 2 +#define _VKILL 3 +#define _VEOF 4 +#define _VMIN 4 +#define _VEOL 5 +#define _VTIME 5 +#define _VEOL2 6 +#define _VSWTC 7 + + +#endif /* _UAPI_ASM_SW64_TERMIOS_H */ -- Gitee From 8bdd54546be2ac6ee2f7037d664b53d9714e7ef1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:02 +0800 Subject: [PATCH 290/953] anolis: sw64: add boot and setup routines ANBZ: #4688 Add basic boot, setup and reset routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/platform.h | 32 + arch/sw_64/include/asm/setup.h | 51 ++ arch/sw_64/include/asm/sw64_init.h | 50 ++ arch/sw_64/include/uapi/asm/bootparam.h | 22 + arch/sw_64/kernel/cacheinfo.c | 99 +++ arch/sw_64/kernel/chip_setup.c | 245 ++++++ arch/sw_64/kernel/early_init.c | 11 + arch/sw_64/kernel/head.S | 112 +++ arch/sw_64/kernel/hmcall.c | 131 +++ arch/sw_64/kernel/reset.c | 120 +++ arch/sw_64/kernel/setup.c | 1061 +++++++++++++++++++++++ 11 files changed, 1934 insertions(+) create mode 100644 arch/sw_64/include/asm/platform.h create mode 100644 arch/sw_64/include/asm/setup.h create mode 100644 arch/sw_64/include/asm/sw64_init.h create mode 100644 arch/sw_64/include/uapi/asm/bootparam.h create mode 100644 arch/sw_64/kernel/cacheinfo.c create mode 100644 arch/sw_64/kernel/chip_setup.c create mode 100644 arch/sw_64/kernel/early_init.c create mode 100644 arch/sw_64/kernel/head.S create mode 100644 arch/sw_64/kernel/hmcall.c create mode 100644 arch/sw_64/kernel/reset.c create mode 100644 arch/sw_64/kernel/setup.c diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h new file mode 100644 index 000000000000..ad54cdc772e1 --- /dev/null +++ b/arch/sw_64/include/asm/platform.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PLATFORM_H +#define _ASM_SW64_PLATFORM_H + +#include +#if defined(CONFIG_UNCORE_XUELANG) +#include +#elif defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#ifdef CONFIG_EFI +#define BIOS_VERSION_GUID EFI_GUID(0xc47a23c3, 0xcebb, 0x4cc9, 0xa5, 0xe2, 0xde, 0xd0, 0x8f, 0xe4, 0x20, 0xb5) + +#define BIOS_SUPPORT_RESET_CLALLBACK(bios_version) ((bios_version) != NULL) + +extern unsigned long bios_version; + +#endif + +extern struct boot_params *sunway_boot_params; + +extern void sw64_halt(void); +extern void sw64_poweroff(void); +extern void sw64_restart(void); +extern void (*pm_restart)(void); +extern void (*pm_halt)(void); +extern int i2c_set_adapter(void); +extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); +extern void fix_jm585_reset(void); + +#endif /* _ASM_SW64_PLATFORM_H */ diff --git a/arch/sw_64/include/asm/setup.h b/arch/sw_64/include/asm/setup.h new file mode 100644 index 000000000000..2d557b349555 --- /dev/null +++ b/arch/sw_64/include/asm/setup.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SETUP_H +#define _ASM_SW64_SETUP_H + +#include + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16 * 1024) + +#define KERNEL_START_PHYS CONFIG_PHYSICAL_START +#define KERNEL_START (__START_KERNEL_map + CONFIG_PHYSICAL_START) + +/* INIT_STACK may be used for merging lwk to kernel*/ +#define INIT_STACK (KERNEL_START + 0x02000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM (KERNEL_START + 0x0A000) +#define COMMAND_LINE ((char *)(KERNEL_START + 0x0B000)) +#define INITRD_START (*(unsigned long *)(PARAM + 0x100)) +#define INITRD_SIZE (*(unsigned long *)(PARAM + 0x108)) +#define DTB_START (*(unsigned long *)(PARAM + 0x118)) + +#define _TEXT_START (KERNEL_START + 0x10000) + +#define COMMAND_LINE_OFF (0x10000UL - 0xB000UL) +#define INITRD_START_OFF (0x10000UL - 0xA100UL) +#define INITRD_SIZE_OFF (0x10000UL - 0xA108UL) + +/* Motherboard Configuration Tables */ +#define MB_CONFIG_START 0x908000 +#define MB_MCLK (MB_CONFIG_START + 0x1) +#define MB_EXTCLK (MB_CONFIG_START + 0x11) + +#ifndef __ASSEMBLY__ +#include +extern struct boot_params *sunway_boot_params; +#endif + +#endif /* _ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h new file mode 100644 index 000000000000..86ddd2cb65f8 --- /dev/null +++ b/arch/sw_64/include/asm/sw64_init.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64_INIT_H +#define _ASM_SW64_SW64_INIT_H + +#include +#include + +#include + +struct sw64_early_init_ops { + void (*setup_core_map)(struct cpumask *cpumask); + unsigned long (*get_node_mem)(int nodeid); + void (*get_smp_info)(void); +}; + +struct sw64_pci_init_ops { + int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + unsigned long (*get_rc_enable)(unsigned long node); + void (*hose_init)(struct pci_controller *hose); + void (*set_rc_piu)(unsigned long node, unsigned long index); + int (*check_pci_linkup)(unsigned long node, unsigned long index); + void (*set_intx)(unsigned long node, unsigned long index, + unsigned long int_conf); +}; + + +struct sw64_chip_init_ops { + struct sw64_early_init_ops early_init; + struct sw64_pci_init_ops pci_init; + void (*fixup)(void); +}; + +struct sw64_chip_ops { + int (*get_cpu_num)(void); + void (*device_interrupt)(unsigned long irq_info); + void (*suspend)(bool wake); + void (*fixup)(void); +}; + +extern void sw64_init_noop(void); +extern void setup_chip_ops(void); +extern struct sw64_chip_ops *sw64_chip; +extern struct sw64_chip_init_ops *sw64_chip_init; +#ifdef CONFIG_PM +extern struct syscore_ops io_syscore_ops; +#endif + +DECLARE_PER_CPU(unsigned long, hard_node_id); + +#endif /* _ASM_SW64_SW64_INIT_H */ diff --git a/arch/sw_64/include/uapi/asm/bootparam.h b/arch/sw_64/include/uapi/asm/bootparam.h new file mode 100644 index 000000000000..6ce75d65e86e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bootparam.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BOOTPARAM_H +#define _UAPI_ASM_SW64_BOOTPARAM_H + +#ifndef __ASSEMBLY__ + +#include + +struct boot_params { + __u64 initrd_start; /* logical address of initrd */ + __u64 initrd_size; /* size of initrd */ + __u64 dtb_start; /* logical address of dtb */ + __u64 efi_systab; /* logical address of EFI system table */ + __u64 efi_memmap; /* logical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u64 efi_memdesc_version; /* memory descriptor version */ + __u64 cmdline; /* logical address of cmdline */ +}; +#endif + +#endif /* _UAPI_ASM_SW64_BOOTPARAM_H */ diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c new file mode 100644 index 000000000000..e340c53690a9 --- /dev/null +++ b/arch/sw_64/kernel/cacheinfo.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 cacheinfo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +#include + +/* Populates leaf and increments to next leaf */ +#define populate_cache(cache, leaf, c_level, c_type, c_id) \ +do { \ + leaf->id = c_id; \ + leaf->attributes = CACHE_ID; \ + leaf->type = c_type; \ + leaf->level = c_level; \ + leaf->coherency_line_size = c->cache.linesz; \ + leaf->number_of_sets = c->cache.sets; \ + leaf->ways_of_associativity = c->cache.ways; \ + leaf->size = c->cache.size; \ + leaf++; \ +} while (0) + +int init_cache_level(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + int levels = 0, leaves = 0; + + /* + * If Dcache is not set, we assume the cache structures + * are not properly initialized. + */ + if (c->dcache.size) + levels += 1; + else + return -ENOENT; + + + leaves += (c->icache.size) ? 2 : 1; + + if (c->scache.size) { + levels++; + leaves++; + } + + if (c->tcache.size) { + levels++; + leaves++; + } + + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cpu_topology *topo = &cpu_topology[cpu]; + + if (c->icache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA, cpu); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST, cpu); + + } else { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->scache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->tcache.size) { + cpumask_copy(&this_leaf->shared_cpu_map, topology_llc_cpumask(cpu)); + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED, topo->package_id); + } + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/sw_64/kernel/chip_setup.c b/arch/sw_64/kernel/chip_setup.c new file mode 100644 index 000000000000..b8c359db2ef6 --- /dev/null +++ b/arch/sw_64/kernel/chip_setup.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include + +struct sw64_chip_ops *sw64_chip; +struct sw64_chip_init_ops *sw64_chip_init; + +static int get_cpu_nums(void) +{ + if (is_guest_or_emul()) + return 1; + + return __get_cpu_nums(); +} + +static unsigned long __init get_node_mem(int nodeid) +{ + + if (is_guest_or_emul()) + return *(unsigned long *)MMSIZE & MMSIZE_MASK; + + return __get_node_mem(nodeid); +} + +static void __init setup_core_map(struct cpumask *cpumask) +{ + int i, j, cpu_num, cpuid, max_cores_per_cpu; + unsigned long coreonline; + + cpu_num = get_cpu_nums(); + cpuid = 0; + for (i = 0; i < cpu_num; i++) { + coreonline = sw64_io_read(i, CORE_ONLINE); + max_cores_per_cpu = MAX_CORES_PER_CPU; + + if (is_guest_or_emul()) + max_cores_per_cpu = 64; + + for (j = 0; j < max_cores_per_cpu; j++) { + if (coreonline & (1UL << j)) { + __cpu_to_rcid[cpuid] = (i << DOMAIN_ID_SHIFT) | (j << CORE_ID_SHIFT); + cpuid++; + } + } + } + + if (is_in_host() && core_is_ht()) { + for (i = 0; i < cpuid; i++) + __cpu_to_rcid[cpuid + i] = __cpu_to_rcid[i] | (1 << THREAD_ID_SHIFT); + + cpuid = cpuid + i; + } + + while (cpuid < NR_CPUS) { + __cpu_to_rcid[cpuid] = -1; + cpuid++; + } +} + +#ifdef CONFIG_PM +static void i2c_srst(void) +{ + sw64_io_write(0, I2C0_SRST_L, 0x0); + sw64_io_write(0, I2C0_SRST_L, 0x1); + + sw64_io_write(0, I2C1_SRST_L, 0x0); + sw64_io_write(0, I2C1_SRST_L, 0x1); + + sw64_io_write(0, I2C2_SRST_L, 0x0); + sw64_io_write(0, I2C2_SRST_L, 0x1); +} + +static void pcie_save(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + piu_save = kzalloc(sizeof(*piu_save), GFP_KERNEL); + + node = hose->node; + index = hose->index; + hose->sysdata = piu_save; + + piu_save->piuconfig0 = read_piu_ior0(node, index, PIUCONFIG0); + piu_save->piuconfig1 = read_piu_ior1(node, index, PIUCONFIG1); + piu_save->epdmabar = read_piu_ior0(node, index, EPDMABAR); + piu_save->msiaddr = read_piu_ior0(node, index, MSIADDR); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + piu_save->msiconfig[i] = read_piu_ior0(node, index, + MSICONFIG0 + (i << 7)); + } + } + + piu_save->iommuexcpt_ctrl = read_piu_ior0(node, index, IOMMUEXCPT_CTRL); + piu_save->dtbaseaddr = read_piu_ior0(node, index, DTBASEADDR); + + piu_save->intaconfig = read_piu_ior0(node, index, INTACONFIG); + piu_save->intbconfig = read_piu_ior0(node, index, INTBCONFIG); + piu_save->intcconfig = read_piu_ior0(node, index, INTCCONFIG); + piu_save->intdconfig = read_piu_ior0(node, index, INTDCONFIG); + piu_save->pmeintconfig = read_piu_ior0(node, index, PMEINTCONFIG); + piu_save->aererrintconfig = read_piu_ior0(node, index, AERERRINTCONFIG); + piu_save->hpintconfig = read_piu_ior0(node, index, HPINTCONFIG); + + } +} + +static void pcie_restore(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + u32 rc_misc_ctrl; + unsigned int value; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + node = hose->node; + index = hose->index; + piu_save = hose->sysdata; + + write_piu_ior0(node, index, PIUCONFIG0, piu_save->piuconfig0); + write_piu_ior1(node, index, PIUCONFIG1, piu_save->piuconfig1); + write_piu_ior0(node, index, EPDMABAR, piu_save->epdmabar); + write_piu_ior0(node, index, MSIADDR, piu_save->msiaddr); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), + piu_save->msiconfig[i]); + } + } + + write_piu_ior0(node, index, IOMMUEXCPT_CTRL, piu_save->iommuexcpt_ctrl); + write_piu_ior0(node, index, DTBASEADDR, piu_save->dtbaseaddr); + + write_piu_ior0(node, index, INTACONFIG, piu_save->intaconfig); + write_piu_ior0(node, index, INTBCONFIG, piu_save->intbconfig); + write_piu_ior0(node, index, INTCCONFIG, piu_save->intcconfig); + write_piu_ior0(node, index, INTDCONFIG, piu_save->intdconfig); + write_piu_ior0(node, index, PMEINTCONFIG, piu_save->pmeintconfig); + write_piu_ior0(node, index, AERERRINTCONFIG, piu_save->aererrintconfig); + write_piu_ior0(node, index, HPINTCONFIG, piu_save->hpintconfig); + + /* Enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* Fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* Set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* Disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + } + +} + +static unsigned long saved_dvc_int, saved_long_time; + +static inline void intpu_save(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + saved_long_time = __io_read_longtime(0); + default: + break; + } +} + +static inline void intpu_restore(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + __io_write_longtime(0, saved_long_time); + __io_write_longtime_start_en(0, 0x1); + break; + default: + pr_info("long time start is disable!"); + break; + } +} + +static inline void spbu_save(void) +{ + saved_dvc_int = sw64_io_read(0, MCU_DVC_INT_EN); +} + +static inline void spbu_restore(void) +{ + i2c_srst(); + sw64_io_write(0, MCU_DVC_INT_EN, saved_dvc_int); +} + +static int io_suspend(void) +{ + spbu_save(); + intpu_save(); + pcie_save(); + + return 0; +} + +static void io_resume(void) +{ + pcie_restore(); + intpu_restore(); + spbu_restore(); +} +#endif /* CONFIG_PM */ + +static struct sw64_chip_init_ops chip_init_ops = { + .early_init = { + .setup_core_map = setup_core_map, + .get_node_mem = get_node_mem, + }, +}; + +static struct sw64_chip_ops chip_ops = { + .get_cpu_num = get_cpu_nums, +}; + +void __init setup_chip_ops(void) +{ + sw64_chip_init = &chip_init_ops; + sw64_chip = &chip_ops; + setup_chip_pci_ops(); +#ifdef CONFIG_PM + io_syscore_ops.suspend = io_suspend; + io_syscore_ops.resume = io_resume; +#endif +} diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c new file mode 100644 index 000000000000..2ec7a3e99443 --- /dev/null +++ b/arch/sw_64/kernel/early_init.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include + +asmlinkage __visible void __init sw64_start_kernel(void) +{ + fixup_hmcall(); + save_ktp(); + start_kernel(); +} diff --git a/arch/sw_64/kernel/head.S b/arch/sw_64/kernel/head.S new file mode 100644 index 000000000000..fd0fbfbcf5b6 --- /dev/null +++ b/arch/sw_64/kernel/head.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * initial boot stuff.. At this point, the bootloader has already + * switched into HMcode, and loaded us at the correct address + * (START_ADDR). So there isn't much left for us to do: just set up + * the kernel global pointer and jump to the kernel entry-point. + */ + +#include +#include +#include +#include + +__HEAD + .globl _stext + .set noreorder + .globl __start + .ent __start +_stext: +__start: + .prologue 0 + br $27, 1f +1: ldgp $29, 0($27) + /* We need to get current_task_info loaded up... */ + ldi $8, init_task + ldl $30, TASK_STACK($8) + /* ... and find our stack ... */ + ldi $30, ASM_THREAD_SIZE($30) + + /* ... and then we can clear bss data. */ + ldi $16, __bss_start + ldi $18, __bss_stop + subl $18, $16, $18 + mov $31, $17 + call $26, __constant_c_memset +#ifdef CONFIG_RELOCATABLE + ldi $30, -8($30) + stl $29, 0($30) + /* Copy kernel and apply the relocations */ + call $26, relocate_kernel + ldl $29, 0($30) + addl $29, $0, $29 + addl $8, $0, $8 + ldi $30, 8($30) + /* Repoint the sp into the new kernel image */ + addl $30, $0, $30 +#endif + /* ... and then we can start the kernel. */ + call $26, sw64_start_kernel + sys_call HMC_halt + .end __start + +#ifdef CONFIG_SMP + .align 3 + .globl __smp_callin + .ent __smp_callin + /* On entry here the PCB of the idle task for this processor + * has been loaded. We've arranged for the tilde_pcb[x] for + * this process to contain the PCBB of the target idle task. + */ +__smp_callin: + .prologue 1 + br $27, 2f # we copy this from above "br $27 1f" +2: ldgp $29, 0($27) # First order of business, load the GP. + + bis $31, $31, $16 # invalidate all TLB with current VPN + sys_call HMC_tbi + +#if defined(CONFIG_SUBARCH_C3B) + sys_call HMC_whami # Get hard cid + ldi $1, __cpu_to_rcid + ldi $2, 0($31) + ldi $4, CONFIG_NR_CPUS +3: ldw $3, 0($1) + cmpeq $3, $0, $3 + bne $3, 4f + addl $1, 4, $1 + addl $2, 1, $2 + cmpeq $2, $4, $5 + bne $5, 5f + br $31, 3b +4: ldi $0, 0($2) +#else + rcid $0 +#endif + + ldi $2, idle_task_pointer + s8addl $0, $2, $2 + ldl $8, 0($2) # Get ksp of idle thread + sys_call HMC_wrktp + + ldl $30, TASK_STACK($8) + ldi $30, ASM_THREAD_SIZE($30) + + call $26, smp_callin +5: + sys_call HMC_halt + .end __smp_callin +#endif /* CONFIG_SMP */ + # + # It is handy, on occasion, to make halt actually just loop. + # Putting it here means we dont have to recompile the whole + # kernel. + # + + .align 3 + .globl halt + .ent halt +halt: + .prologue 0 + sys_call HMC_halt + .end halt diff --git a/arch/sw_64/kernel/hmcall.c b/arch/sw_64/kernel/hmcall.c new file mode 100644 index 000000000000..d2054a930bd7 --- /dev/null +++ b/arch/sw_64/kernel/hmcall.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/hmcall.c + * + * Copyright (C) 2022 WXIAT + * Author: He Sheng + */ + +#include +#include + +#define A0(func) (((HMC_##func & 0xFF) >> 6) & 0x1) +#define A1(func) ((((HMC_##func & 0xFF)>>6) & 0x2) >> 1) +#define A2(func) ((HMC_##func & 0x3F) << 7) + +#define T(func) ((A0(func) ^ A1(func)) & 0x1) +#define B0(func) ((T(func) | A0(func)) << 13) +#define B1(func) (((~T(func) & 1) | A1(func)) << 14) + +#define PRI_BASE 0x10000UL + +#define HMCALL_ENTRY(func) (PRI_BASE | B1(func) | B0(func) | A2(func)) + + +static inline void fixup_rdtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdtp)); + + entry[0] = 0x181ffec7; /* pri_rcsr $0, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrtp)); + + entry[0] = 0x1a1fffc7; /* pri_wcsr $16, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_tbiasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(tbisasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x1a3fff46; /* pri_wcsr r17, CSR__DTB_IS */ + entry[8] = 0x18ffff47; /* pri_wcsr p7, CSR__DTB_PCR */ + entry[9] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[10] = 0x189ffe22; /* pri_rcsr p4, CSR__UPCR_UPN */ + entry[11] = 0x18dfff22; /* pri_wcsr p6, CSR__UPCR_UPN */ + entry[12] = 0x1a3fff06; /* pri_wcsr r17, CSR__ITB_IS */ + entry[13] = 0x1bffff15; /* pri_wcsr r31, CSR__IC_FLUSH */ + entry[14] = 0x189fff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[15] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_wrasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[8] = 0x18dfff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[9] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_rdktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdktp)); + + entry[0] = 0x95161000; /* pri_ldl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrktp)); + + entry[0] = 0xb5161000; /* pri_stl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_rdusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdusp)); + + entry[0] = 0x94161018; /* pri_ldl/p $0, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrusp)); + + entry[0] = 0xb6161018; /* pri_stl/p $16, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +void __init fixup_hmcall(void) +{ +#if defined(CONFIG_SUBARCH_C3B) + fixup_rdtp(); + fixup_wrtp(); + fixup_tbiasid(); + fixup_wrasid(); + fixup_rdktp(); + fixup_wrktp(); + fixup_rdusp(); + fixup_wrusp(); + imemb(); +#endif +} + +#undef A0 +#undef A1 +#undef A2 +#undef T +#undef B0 +#undef B1 diff --git a/arch/sw_64/kernel/reset.c b/arch/sw_64/kernel/reset.c new file mode 100644 index 000000000000..955339557a7a --- /dev/null +++ b/arch/sw_64/kernel/reset.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Sunway Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void fix_jm585_reset(void) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + int val; + + pdev = pci_get_device(PCI_VENDOR_ID_JMICRON, + 0x0585, NULL); + if (pdev) { + hose = pci_bus_to_pci_controller(pdev->bus); + val = read_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val | 0x8); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val); + } + +} +static void default_halt(void) +{ + local_irq_disable(); + + pr_notice("\n\n** You can safely turn off the power now **\n\n"); + + while (true) + arch_cpu_idle(); +} + +static void default_poweroff(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); +#ifdef CONFIG_EFI + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +#endif + while (true) + arch_cpu_idle(); +} + +static void default_restart(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); + + fix_jm585_reset(); +#ifdef CONFIG_EFI + if (efi_capsule_pending(NULL)) + efi_reboot(REBOOT_WARM, NULL); + else + efi_reboot(REBOOT_COLD, NULL); +#endif + + while (true) + arch_cpu_idle(); +} + +void (*pm_restart)(void); + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*pm_halt)(void); + +void machine_halt(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_halt(); +} + +void machine_power_off(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_power_off(); +} + +void machine_restart(char *command) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); + pm_restart(); +} + +static int __init sw64_reboot_setup(void) +{ + pm_restart = default_restart; + pm_power_off = default_poweroff; + pm_halt = default_halt; + + return 0; +} +arch_initcall(sw64_reboot_setup); diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c new file mode 100644 index 000000000000..0c1ddb9b46d7 --- /dev/null +++ b/arch/sw_64/kernel/setup.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Bootup setup stuff. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MAGIC_SYSRQ +#include +#include +#endif +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +#undef DEBUG_DISCONTIG +#ifdef DEBUG_DISCONTIG +#define DBGDCONT(args...) pr_debug(args) +#else +#define DBGDCONT(args...) +#endif + +int __cpu_to_rcid[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_to_rcid); + +DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +struct cma *sw64_kvm_cma; +EXPORT_SYMBOL(sw64_kvm_cma); + +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; + +struct gen_pool *sw64_kvm_pool; +EXPORT_SYMBOL(sw64_kvm_pool); +#endif +#endif + +static inline int phys_addr_valid(unsigned long addr) +{ + /* + * At this point memory probe has not been done such that max_pfn + * and other physical address variables cannot be used, so let's + * roughly judge physical address based on arch specific bit. + */ + return !(addr >> (cpu_desc.pa_bits - 1)); +} + +extern struct atomic_notifier_head panic_notifier_list; +static int sw64_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block sw64_panic_block = { + sw64_panic_event, + NULL, + INT_MAX /* try to do it first */ +}; + +/* the value is IOR: CORE_ONLIE*/ +cpumask_t core_start = CPU_MASK_NONE; + +static struct resource data_resource = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource code_resource = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +/* A collection of per-processor data. */ +struct cpuinfo_sw64 cpu_data[NR_CPUS]; +EXPORT_SYMBOL(cpu_data); + +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); +struct cpu_desc_t cpu_desc; +struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +int memmap_nr; +struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +bool memblock_initialized; + +cpumask_t cpu_offline = CPU_MASK_NONE; + +static char command_line[COMMAND_LINE_SIZE] __initdata; +#ifdef CONFIG_CMDLINE_BOOL +static char builtin_cmdline[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; +#endif + +/* boot_params */ +struct boot_params *sunway_boot_params = (struct boot_params *) (PARAM + 0x100); + +/* + * The format of "screen_info" is strange, and due to early + * i386-setup code. This is just enough to make the console + * code think we're on a VGA color display. + */ + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; +EXPORT_SYMBOL(screen_info); + +/* + * Move global data into per-processor storage. + */ +void store_cpu_data(int cpu) +{ + cpu_data[cpu].last_asid = ASID_FIRST_VERSION; +} + +#ifdef CONFIG_KEXEC + +void *kexec_control_page; + +#define KTEXT_MAX KERNEL_IMAGE_SIZE + +static void __init kexec_control_page_init(void) +{ + phys_addr_t addr; + + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); + kexec_control_page = (void *)(__START_KERNEL_map + addr); +} + +/* + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + ret = parse_crashkernel(boot_command_line, mem_desc.size, + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + if (!crash_size) { + pr_warn("size of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!crash_base) { + pr_warn("base of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + + if (!memblock_is_region_memory(crash_base, crash_size)) + memblock_add(crash_base, crash_size); + + ret = memblock_reserve(crash_base, crash_size); + if (ret < 0) { + pr_warn("crashkernel reservation failed - memory is in use [mem %#018llx-%#018llx]\n", + crash_base, crash_base + crash_size - 1); + return; + } + + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(mem_desc.size >> 20)); + + ret = add_memmap_region(crash_base, crash_size, memmap_crashkernel); + if (ret) + pr_warn("Add crash kernel area [mem %#018llx-%#018llx] to memmap region failed.\n", + crash_base, crash_base + crash_size - 1); + + if (crash_base >= KERNEL_IMAGE_SIZE) + pr_warn("Crash base should be less than %#x\n", KERNEL_IMAGE_SIZE); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else /* !defined(CONFIG_KEXEC) */ +static void __init reserve_crashkernel(void) {} +static void __init kexec_control_page_init(void) {} +#endif /* !defined(CONFIG_KEXEC) */ + +/* + * I/O resources inherited from PeeCees. Except for perhaps the + * turbochannel SWs, everyone has these on some sort of SuperIO chip. + * + * ??? If this becomes less standard, move the struct out into the + * machine vector. + */ + +static void __init +reserve_std_resources(void) +{ + static struct resource standard_io_resources[] = { + { .name = "rtc", .start = -1, .end = -1 }, + { .name = "dma1", .start = 0x00, .end = 0x1f }, + { .name = "pic1", .start = 0x20, .end = 0x3f }, + { .name = "timer", .start = 0x40, .end = 0x5f }, + { .name = "keyboard", .start = 0x60, .end = 0x6f }, + { .name = "dma page reg", .start = 0x80, .end = 0x8f }, + { .name = "pic2", .start = 0xa0, .end = 0xbf }, + { .name = "dma2", .start = 0xc0, .end = 0xdf }, + }; + + struct resource *io = &ioport_resource; + size_t i; + + if (hose_head) { + struct pci_controller *hose; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == 0) { + io = hose->io_space; + break; + } + } + + /* Fix up for the Jensen's queer RTC placement. */ + standard_io_resources[0].start = RTC_PORT(0); + standard_io_resources[0].end = RTC_PORT(0) + 0x10; + + for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) + request_resource(io, standard_io_resources+i); +} + +static int __init parse_memmap_one(char *p) +{ + char *oldp; + u64 start_at, mem_size; + int ret; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" not valid on sw64\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + pr_err("\"memmap=nn@ss\" invalid on sw64\n"); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on sw64\n"); + } else if (*p == '$') { + start_at = memparse(p + 1, &p); + ret = add_memmap_region(start_at, mem_size, memmap_reserved); + if (ret) + return ret; + } else { + return -EINVAL; + } + return *p == '\0' ? 0 : -EINVAL; +} + +static int __init setup_memmap(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; +} +early_param("memmap", setup_memmap); + +static int __init setup_cpuoffline(char *p) +{ + cpulist_parse(p, &cpu_offline); + cpumask_clear_cpu(0, &cpu_offline); + return 0; +} +early_param("cpuoffline", setup_cpuoffline); + +#ifdef CONFIG_BLK_DEV_INITRD +static void * __init move_initrd(unsigned long mem_limit) +{ + void *start; + unsigned long size; + + size = initrd_end - initrd_start; + start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0); + if (!start || __pa(start) + size > mem_limit) { + initrd_start = initrd_end = 0; + return NULL; + } + memmove(start, (void *)initrd_start, size); + initrd_start = (unsigned long)start; + initrd_end = initrd_start + size; + pr_info("initrd moved to 0x%px\n", start); + return start; +} +#else +static void * __init move_initrd(unsigned long mem_limit) +{ + return NULL; +} +#endif + +static bool __init memmap_range_valid(phys_addr_t base, phys_addr_t *size) +{ + if (base > memblock_end_of_DRAM()) + return false; + + if ((base + *size) > memblock_end_of_DRAM()) + *size = memblock_end_of_DRAM() - base; + + return true; +} + +void __init process_memmap(void) +{ + static int i; // Make it static so we won't start over again every time. + int ret; + phys_addr_t base, size; + unsigned long dma_end __maybe_unused = (MAX_DMA32_PFN << PAGE_SHIFT); + + if (!memblock_initialized) + return; + + for (; i < memmap_nr; i++) { + base = memmap_map[i].addr; + size = memmap_map[i].size; + switch (memmap_map[i].type) { + case memmap_reserved: + if (!memmap_range_valid(base, &size)) { + pr_err("reserved memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); + } + break; + case memmap_pci: + if (!memmap_range_valid(base, &size)) { + pr_err("pci memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("pci memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + } + break; + case memmap_initrd: + if ((base + size) > memblock_end_of_DRAM()) { + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); + if (!base) { + pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", + old_base, old_base + size - 1, memblock_end_of_DRAM()); + break; + } + memmap_map[i].addr = base; + } + pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); + ret = memblock_reserve(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + break; + case memmap_kvm: + case memmap_crashkernel: + /* kvm and crashkernel are handled elsewhere, skip */ + break; + case memmap_acpi: + pr_err("ACPI memmap region is not supported.\n"); + break; + case memmap_use: + pr_err("Force usage memmap region is not supported.\n"); + break; + case memmap_protected: + pr_err("Protected memmap region is not supported.\n"); + break; + default: + pr_err("Unknown type of memmap region.\n"); + } + } +} + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type) +{ + if (memmap_nr >= ARRAY_SIZE(memmap_map)) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return -EPERM; + } + + if (addr + size <= addr) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return -EINVAL; + } + + memmap_map[memmap_nr].addr = addr; + memmap_map[memmap_nr].size = size; + memmap_map[memmap_nr].type = type; + memmap_nr++; + + process_memmap(); + + return 0; +} + +static struct resource* __init +insert_ram_resource(u64 start, u64 end, bool reserved) +{ + struct resource *res = + kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return NULL; + if (reserved) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + res->start = start; + res->end = end; + if (insert_resource(&iomem_resource, res)) { + kfree(res); + return NULL; + } + return res; +} + +static int __init request_standard_resources(void) +{ + struct memblock_region *mblk; + + extern char _text[], _etext[]; + extern char _sdata[], _edata[]; + extern char __bss_start[], __bss_stop[]; + + for_each_mem_region(mblk) { + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); + } + + code_resource.start = __pa_symbol(_text); + code_resource.end = __pa_symbol(_etext)-1; + data_resource.start = __pa_symbol(_sdata); + data_resource.end = __pa_symbol(_edata)-1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop)-1; + + insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &data_resource); + insert_resource(&iomem_resource, &bss_resource); + + return 0; +} +subsys_initcall(request_standard_resources); + +#ifdef CONFIG_NUMA +extern void cpu_set_node(void); +#endif + +static void __init show_socket_mem_layout(void) +{ + int i; + phys_addr_t base, size, end; + + base = 0; + + pr_info("Socket memory layout:\n"); + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + size = socket_desc[i].socket_mem; + end = base + size - 1; + pr_info("Socket %d: [mem %#018llx-%#018llx], size %llu\n", + i, base, end, size); + base = end + 1; + } + } + pr_info("Reserved memory size for Socket 0: %#lx\n", NODE0_START); +} + +int page_is_ram(unsigned long pfn) +{ + pfn <<= PAGE_SHIFT; + + return pfn >= mem_desc.base && pfn < (mem_desc.base + mem_desc.size); +} + +static int __init topology_init(void) +{ + int i, ret; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_devices, i); + +#ifdef CONFIG_HOTPLUG_CPU + if (i != 0) + cpu->hotpluggable = 1; +#endif + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + + return 0; +} +subsys_initcall(topology_init); + +static void __init setup_machine_fdt(void) +{ +#ifdef CONFIG_USE_OF + void *dt_virt; + const char *name; + + /* Give a chance to select kernel builtin DTB firstly */ + if (IS_ENABLED(CONFIG_BUILTIN_DTB)) + dt_virt = (void *)__dtb_start; + else { + dt_virt = (void *)sunway_boot_params->dtb_start; + if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) { + pr_emerg("BUG: DTB has been corrupted by kernel image!\n"); + while (true) + cpu_relax(); + } + } + + if (!phys_addr_valid(__boot_pa(dt_virt)) || + !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at virtual address %px\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + dt_virt); + + while (true) + cpu_relax(); + } + + name = of_flat_dt_get_machine_name(); + if (!name) + return; + + pr_info("Machine model: %s\n", name); +#else + pr_info("Kernel disable device tree support.\n"); + return; +#endif +} + +void __init device_tree_init(void) +{ + unflatten_and_copy_device_tree(); + sunway_boot_params->dtb_start = (__u64)initial_boot_params; +} + +static void __init setup_cpu_info(void) +{ + int i; + struct cache_desc *c; + unsigned long val; + + val = cpuid(GET_TABLE_ENTRY, 0); + cpu_desc.model = CPUID_MODEL(val); + cpu_desc.family = CPUID_FAMILY(val); + cpu_desc.chip_var = CPUID_CHIP_VAR(val); + cpu_desc.arch_var = CPUID_ARCH_VAR(val); + cpu_desc.arch_rev = CPUID_ARCH_REV(val); + cpu_desc.pa_bits = CPUID_PA_BITS(val); + cpu_desc.va_bits = CPUID_VA_BITS(val); + + for (i = 0; i < VENDOR_ID_MAX; i++) { + val = cpuid(GET_VENDOR_ID, i); + memcpy(cpu_desc.vendor_id + (i * 8), &val, 8); + } + + for (i = 0; i < MODEL_MAX; i++) { + val = cpuid(GET_MODEL, i); + memcpy(cpu_desc.model_id + (i * 8), &val, 8); + } + + cpu_desc.frequency = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + + for (i = 0; i < NR_CPUS; i++) { + c = &(cpu_data[i].icache); + val = cpuid(GET_CACHE_INFO, L1_ICACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].dcache); + val = cpuid(GET_CACHE_INFO, L1_DCACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].scache); + val = cpuid(GET_CACHE_INFO, L2_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].tcache); + val = cpuid(GET_CACHE_INFO, L3_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + } +} + +static void __init setup_run_mode(void) +{ + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } +} + +static void __init setup_socket_info(void) +{ + int i; + int numsockets = sw64_chip->get_cpu_num(); + + memset(socket_desc, 0, MAX_NUMSOCKETS * sizeof(struct socket_desc_t)); + + for (i = 0; i < numsockets; i++) { + socket_desc[i].is_online = 1; + if (sw64_chip_init->early_init.get_node_mem) + socket_desc[i].socket_mem = sw64_chip_init->early_init.get_node_mem(i); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init reserve_mem_for_initrd(void) +{ + int ret; + + initrd_start = sunway_boot_params->initrd_start; + if (initrd_start) { + initrd_start = __pa(initrd_start) + PAGE_OFFSET; + initrd_end = initrd_start + sunway_boot_params->initrd_size; + pr_info("Initial ramdisk at: 0x%px (%llu bytes)\n", + (void *)initrd_start, sunway_boot_params->initrd_size); + + ret = add_memmap_region(__pa(initrd_start), initrd_end - initrd_start, memmap_initrd); + if (ret) + pr_err("Add initrd area [mem %#018lx-%#018lx] to memmap region failed.\n", + __pa(initrd_start), __pa(initrd_end - 1)); + } +} +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init early_kvm_reserved_mem(char *p) +{ + if (!p) { + pr_err("Config string not provided\n"); + return -EINVAL; + } + + kvm_mem_size = memparse(p, &p); + if (*p != '@') + return -EINVAL; + kvm_mem_base = memparse(p + 1, &p); + return 0; +} +early_param("kvm_mem", early_kvm_reserved_mem); + +void __init sw64_kvm_reserve(void) +{ + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, + PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); +} +#endif +#endif + +void __init +setup_arch(char **cmdline_p) +{ + /** + * Work around the unaligned access exception to parse ACPI + * tables in the following function acpi_boot_table_init(). + */ + trap_init(); + + jump_label_init(); + setup_cpu_info(); + setup_run_mode(); + setup_chip_ops(); + setup_socket_info(); + show_socket_mem_layout(); + sw64_chip_init->early_init.setup_core_map(&core_start); + if (is_guest_or_emul()) + get_vt_smp_info(); + + setup_sched_clock(); + + setup_machine_fdt(); + + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &sw64_panic_block); + + callback_init(); + + /* command line */ + if (!sunway_boot_params->cmdline) + sunway_boot_params->cmdline = (unsigned long)COMMAND_LINE; + + strscpy(boot_command_line, (char *)sunway_boot_params->cmdline, COMMAND_LINE_SIZE); + +#if IS_ENABLED(CONFIG_CMDLINE_BOOL) +#if IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) + strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + strscpy((char *)sunway_boot_params->cmdline, boot_command_line, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + /* append builtin to boot loader cmdline */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + } +#endif /* CMDLINE_EXTEND */ +#endif + + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + /* + * Process command-line arguments. + */ + parse_early_param(); + + /* Find our memory. */ + mem_detect(); + +#ifdef CONFIG_PCI + reserve_mem_for_pci(); +#endif + +#ifdef CONFIG_BLK_DEV_INITRD + reserve_mem_for_initrd(); +#endif + + sw64_memblock_init(); + + reserve_crashkernel(); + + /* Reserve large chunks of memory for use by CMA for KVM. */ +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + sw64_kvm_reserve(); +#endif +#endif + + efi_init(); + + /* Try to upgrade ACPI tables via initrd */ + acpi_table_upgrade(); + + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + +#ifdef CONFIG_SMP + setup_smp(); +#else + store_cpu_data(0); +#endif + + sw64_numa_init(); + + memblock_dump_all(); + + sparse_init(); + + zone_sizes_init(); + + paging_init(); + + kexec_control_page_init(); + + /* + * Initialize the machine. Usually has to do with setting up + * DMA windows and the like. + */ + sw64_init_arch(); + + /* Reserve standard resources. */ + reserve_std_resources(); + + /* + * Give us a default console. TGA users will see nothing until + * chr_dev_init is called, rather late in the boot sequence. + */ + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + /* Default root filesystem to sda2. */ + ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); + + if (acpi_disabled) { +#ifdef CONFIG_NUMA + cpu_set_node(); +#endif + device_tree_init(); + } +} + + +static int +show_cpuinfo(struct seq_file *f, void *slot) +{ + int i; + unsigned long cpu_freq; + + cpu_freq = cpuid(GET_CPU_FREQ, 0); + + for_each_online_cpu(i) { + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ + seq_printf(f, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s CPU @ %lu.%lu%luGHz\n" + "cpu variation\t: %u\n" + "cpu revision\t: %u\n", + i, cpu_desc.vendor_id, cpu_desc.family, + cpu_desc.model, cpu_desc.model_id, + cpu_freq / 1000, (cpu_freq % 1000) / 100, + (cpu_freq % 100) / 10, + cpu_desc.arch_var, cpu_desc.arch_rev); + seq_printf(f, "cpu MHz\t\t: %lu.00\n" + "cache size\t: %u KB\n" + "physical id\t: %d\n" + "bogomips\t: %lu.%02lu\n", + get_cpu_freq() / 1000 / 1000, cpu_data[i].tcache.size >> 10, + cpu_topology[i].package_id, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + + seq_printf(f, "flags\t\t: fpu simd vpn upn cpuid\n"); + seq_printf(f, "page size\t: %d\n", 8192); + seq_printf(f, "cache_alignment\t: %d\n", cpu_data[i].tcache.linesz); + seq_printf(f, "address sizes\t: %u bits physical, %u bits virtual\n\n", + cpu_desc.pa_bits, cpu_desc.va_bits); + } + return 0; +} + +/* + * We show only CPU #0 info. + */ +static void * +c_start(struct seq_file *f, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void * +c_next(struct seq_file *f, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static void +c_stop(struct seq_file *f, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +static int +sw64_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} + +static __init int add_pcspkr(void) +{ + struct platform_device *pd; + int ret; + + pd = platform_device_alloc("pcspkr", -1); + if (!pd) + return -ENOMEM; + + ret = platform_device_add(pd); + if (ret) + platform_device_put(pd); + + return ret; +} +device_initcall(add_pcspkr); + +#ifdef CONFIG_DEBUG_FS +struct dentry *sw64_debugfs_dir; +EXPORT_SYMBOL(sw64_debugfs_dir); + +static int __init debugfs_sw64(void) +{ + struct dentry *d; + + d = debugfs_create_dir("sw64", NULL); + if (!d) + return -ENOMEM; + sw64_debugfs_dir = d; + return 0; +} +arch_initcall(debugfs_sw64); +#endif + +#ifdef CONFIG_OF +static int __init sw64_of_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +core_initcall(sw64_of_init); +#endif + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init sw64_kvm_pool_init(void) +{ + int status = 0; + unsigned long kvm_pool_virt; + struct page *base_page, *end_page, *p; + + if (!sw64_kvm_cma) + goto out; + + kvm_pool_virt = (unsigned long)kvm_mem_base; + + sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!sw64_kvm_pool) + goto out; + + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); + if (status < 0) { + pr_err("failed to add memory chunks to sw64 kvm pool\n"); + gen_pool_destroy(sw64_kvm_pool); + sw64_kvm_pool = NULL; + goto out; + } + gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); + + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); + + p = base_page; + while (p <= end_page && page_ref_count(p) == 0) { + set_page_count(p, 1); + page_mapcount_reset(p); + SetPageReserved(p); + p++; + } + + return status; + +out: + return -ENOMEM; +} +core_initcall_sync(sw64_kvm_pool_init); +#endif +#endif -- Gitee From b847c77ed40a2a1410f67b7271a2c4addd1578d1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 291/953] anolis: sw64: add topology setup routine ANBZ: #4688 Add topology setup for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/topology.h | 71 ++++++++++ arch/sw_64/kernel/topology.c | 212 ++++++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 arch/sw_64/include/asm/topology.h create mode 100644 arch/sw_64/kernel/topology.c diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h new file mode 100644 index 000000000000..25ec7b9e9431 --- /dev/null +++ b/arch/sw_64/include/asm/topology.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TOPOLOGY_H +#define _ASM_SW64_TOPOLOGY_H + +#include +#include +#include +#include +#include +#include + +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(int cpuid); +void remove_cpu_topology(int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +static inline int rcid_to_thread_id(int rcid) +{ + return (rcid & THREAD_ID_MASK) >> THREAD_ID_SHIFT; +} + +static inline int rcid_to_core_id(int rcid) +{ + return (rcid & CORE_ID_MASK) >> CORE_ID_SHIFT; +} + +static inline int rcid_to_domain_id(int rcid) +{ + return (rcid & DOMAIN_ID_MASK) >> DOMAIN_ID_SHIFT; +} + +#ifdef CONFIG_NUMA + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else +extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); +extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) +#define parent_node(node) (node) +#define cpumask_of_pcibus(bus) (cpu_online_mask) +#else /* !CONFIG_NUMA */ +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void numa_store_cpu_info(unsigned int cpu) { } +#endif /* CONFIG_NUMA */ + +extern void get_vt_smp_info(void); + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } + +#endif /* _ASM_SW64_TOPOLOGY_H */ diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c new file mode 100644 index 000000000000..8371c013446f --- /dev/null +++ b/arch/sw_64/kernel/topology.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +static int __init parse_dt_topology(void) +{ + return 0; +} + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +int topo_nr_threads, topo_nr_cores, topo_nr_maxcpus; + +static int topo_nr_cpus; +static int topo_threads[NR_CPUS]; +static int topo_cores[NR_CPUS]; +static int topo_packages[NR_CPUS]; + +void __init get_vt_smp_info(void) +{ + unsigned long smp_info; + + smp_info = sw64_io_read(0, SMP_INFO); + if (smp_info == -1UL) + smp_info = 0; + topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; + topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; + topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; +} + +static void __init init_topo_threads(void) +{ + int i, j; + + if (topo_nr_threads == 0) + topo_nr_threads = 1; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_threads) { + for (j = 0; j < topo_nr_threads; j++) + topo_threads[i+j] = j; + } +} + +static void __init init_topo_cores(void) +{ + int i, j; + + if (topo_nr_cores == 0) + topo_nr_cores = topo_nr_cpus; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_cores) { + for (j = 0; j < topo_nr_cores; j++) + topo_cores[i+j] = j; + } +} + +static void __init init_topo_packages(void) +{ + int i, j, packet_index = 0; + int topo_nr_packages = topo_nr_cpus / (topo_nr_cores * topo_nr_threads); + int div_package = topo_nr_cpus / topo_nr_packages; + + for (i = 0; i < topo_nr_cpus; i += div_package) { + for (j = 0 ; j < div_package; j++) + topo_packages[i+j] = packet_index; + packet_index++; + } + if (packet_index > topo_nr_packages) + pr_err("topo_cores init failed.\n"); +} + +static void __init init_topology_array(void) +{ + topo_nr_cpus = num_present_cpus(); + if (topo_nr_maxcpus > topo_nr_cpus) + topo_nr_cpus = topo_nr_maxcpus; + init_topo_threads(); + init_topo_cores(); + init_topo_packages(); +} + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return topology_llc_cpumask(cpu); +} + +static void update_siblings_masks(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + int sib; + + /* update core and thread sibling masks */ + for_each_online_cpu(sib) { + struct cpu_topology *sib_topo = &cpu_topology[sib]; + + if (cpu_topo->package_id == sib_topo->package_id) { + cpumask_set_cpu(cpu, &sib_topo->core_sibling); + cpumask_set_cpu(sib, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &sib_topo->llc_sibling); + cpumask_set_cpu(sib, &cpu_topo->llc_sibling); + + if (cpu_topo->core_id == sib_topo->core_id) { + cpumask_set_cpu(cpu, &sib_topo->thread_sibling); + cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + } + } + } +} + +void store_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + if (cpu_topo->package_id != -1) + goto topology_populated; + + if (is_guest_or_emul()) { + cpu_topo->package_id = topo_packages[cpu]; + cpu_topo->core_id = topo_cores[cpu]; + cpu_topo->thread_id = topo_threads[cpu]; + goto topology_populated; + } + + cpu_topo->package_id = rcid_to_domain_id(cpu_to_rcid(cpu)); + cpu_topo->core_id = rcid_to_core_id(cpu_to_rcid(cpu)); + cpu_topo->thread_id = rcid_to_thread_id(cpu_to_rcid(cpu)); + + pr_debug("CPU%u: socket %d core %d thread %d\n", + cpu, cpu_topo->package_id, cpu_topo->core_id, + cpu_topo->thread_id); + +topology_populated: + update_siblings_masks(cpu); +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +static void __init reset_cpu_topology(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = 0; + cpu_topo->package_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +#ifdef CONFIG_ACPI +static int __init parse_acpi_topology(void) +{ + return 0; +} +#else +static inline int __init parse_acpi_topology(void) +{ + return -EINVAL; +} +#endif + +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + if (is_guest_or_emul()) + init_topology_array(); + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (!acpi_disabled && parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} -- Gitee From 1471e3c8d24c43bbc09981ef49385462794b11e0 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 292/953] anolis: sw64: add timer support ANBZ: #4688 Add timer for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/tc.h | 16 ++++++++++++++++ arch/sw_64/include/asm/timer.h | 11 +++++++++++ arch/sw_64/include/asm/timex.h | 23 +++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 arch/sw_64/include/asm/tc.h create mode 100644 arch/sw_64/include/asm/timer.h create mode 100644 arch/sw_64/include/asm/timex.h diff --git a/arch/sw_64/include/asm/tc.h b/arch/sw_64/include/asm/tc.h new file mode 100644 index 000000000000..aa39c3528e3f --- /dev/null +++ b/arch/sw_64/include/asm/tc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TC_H +#define _ASM_SW64_TC_H + +static inline unsigned long rdtc(void) +{ + unsigned long ret; + + __asm__ __volatile__ ("rtc %0" : "=r"(ret)); + return ret; +} + +extern void tc_sync_clear(void); +extern void tc_sync_ready(void *ignored); +extern void tc_sync_set(void); +#endif /* _ASM_SW64_TC_H */ diff --git a/arch/sw_64/include/asm/timer.h b/arch/sw_64/include/asm/timer.h new file mode 100644 index 000000000000..9ea9e0a538d0 --- /dev/null +++ b/arch/sw_64/include/asm/timer.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMER_H +#define _ASM_SW64_TIMER_H + +extern void sw64_setup_clocksource(void); + +extern void sw64_setup_timer(void); + +extern void __init setup_sched_clock(void); + +#endif /* _ASM_SW64_TIMER_H */ diff --git a/arch/sw_64/include/asm/timex.h b/arch/sw_64/include/asm/timex.h new file mode 100644 index 000000000000..a5760bf8abd4 --- /dev/null +++ b/arch/sw_64/include/asm/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMEX_H +#define _ASM_SW64_TIMEX_H + +#include + +/* With only one or two oddballs, we use the RTC as the ticker, selecting + * the 32.768kHz reference clock, which nicely divides down to our HZ. + */ +#define CLOCK_TICK_RATE 32768 + +/* + * Standard way to access the cycle counter. + */ + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return rdtc(); +} + +#endif /* _ASM_SW64_TIMEX_H */ -- Gitee From 899621927ac5a2b095d9e3b5dd83bd8eeabf8d28 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 293/953] anolis: sw64: add irq handling support ANBZ: #4688 Add interrupt handling mechanism for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hardirq.h | 24 +++++++ arch/sw_64/include/asm/hw_irq.h | 16 +++++ arch/sw_64/include/asm/irq.h | 31 +++++++++ arch/sw_64/include/asm/irq_impl.h | 48 +++++++++++++ arch/sw_64/include/asm/irqflags.h | 55 +++++++++++++++ arch/sw_64/kernel/irq.c | 108 ++++++++++++++++++++++++++++++ arch/sw_64/kernel/irq_sw64.c | 84 +++++++++++++++++++++++ arch/sw_64/kernel/time.c | 63 +++++++++++++++++ 8 files changed, 429 insertions(+) create mode 100644 arch/sw_64/include/asm/hardirq.h create mode 100644 arch/sw_64/include/asm/hw_irq.h create mode 100644 arch/sw_64/include/asm/irq.h create mode 100644 arch/sw_64/include/asm/irq_impl.h create mode 100644 arch/sw_64/include/asm/irqflags.h create mode 100644 arch/sw_64/kernel/irq.c create mode 100644 arch/sw_64/kernel/irq_sw64.c create mode 100644 arch/sw_64/kernel/time.c diff --git a/arch/sw_64/include/asm/hardirq.h b/arch/sw_64/include/asm/hardirq.h new file mode 100644 index 000000000000..03368c3659dd --- /dev/null +++ b/arch/sw_64/include/asm/hardirq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HARDIRQ_H +#define _ASM_SW64_HARDIRQ_H + +void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include + +#define __ARCH_IRQ_STAT +typedef struct { + u16 __softirq_pending; + unsigned int timer_irqs_event; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define arch_irq_stat_cpu arch_irq_stat_cpu +#define arch_irq_stat arch_irq_stat +extern u64 arch_irq_stat_cpu(unsigned int cpu); +extern u64 arch_irq_stat(void); + +#endif /* _ASM_SW64_HARDIRQ_H */ diff --git a/arch/sw_64/include/asm/hw_irq.h b/arch/sw_64/include/asm/hw_irq.h new file mode 100644 index 000000000000..3cfc725f7517 --- /dev/null +++ b/arch/sw_64/include/asm/hw_irq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_IRQ_H +#define _ASM_SW64_HW_IRQ_H + +#include + +extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); + +#define ACTUAL_NR_IRQS NR_IRQS + +#ifdef CONFIG_PCI_MSI +typedef unsigned int vector_irq_t[PERCPU_MSI_IRQS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); +#endif +#endif /* _ASM_SW64_HW_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq.h b/arch/sw_64/include/asm/irq.h new file mode 100644 index 000000000000..b3ac4105c29e --- /dev/null +++ b/arch/sw_64/include/asm/irq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQ_H +#define _ASM_SW64_IRQ_H + +/* + * arch/sw/include/asm/irq.h + * + * (C) 2012 OSKernel JN + */ + +#include + +#define NR_VECTORS_PERCPU 256 +#define NR_IRQS_LEGACY 16 +#define NR_IRQS ((NR_VECTORS_PERCPU + NR_IRQS_LEGACY) * NR_CPUS) + +static inline int irq_canonicalize(int irq) +{ + /* + * XXX is this true for all Sw? The old serial driver + * did it this way for years without any complaints, so.... + */ + return ((irq == 2) ? 9 : irq); +} + +struct pt_regs; +extern void (*perf_irq)(unsigned long vector, struct pt_regs *regs); +extern void fixup_irqs(void); +extern void sw64_timer_interrupt(void); + +#endif /* _ASM_SW64_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq_impl.h b/arch/sw_64/include/asm/irq_impl.h new file mode 100644 index 000000000000..797af433a126 --- /dev/null +++ b/arch/sw_64/include/asm/irq_impl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the IRQ handling routines in irq.c. + */ + +#ifndef _ASM_SW64_IRQ_IMPL_H +#define _ASM_SW64_IRQ_IMPL_H + +#include +#include +#include + +#include + +#define SW64_PCIE0_INT_BASE 17 +#define SW64_PCIE0_MSI_BASE 21 + +#define SW64_PCIE1_INT_BASE 277 +#define SW64_PCIE1_MSI_BASE 281 + +#define RTC_IRQ 8 +#define SWI2C_IRQ 14 + +enum sw64_irq_type { + INT_IPI = 1, + INT_PC0 = 2, + INT_PC1 = 3, + INT_INTx = 5, + INT_MSI = 6, + INT_MT = 7, + INT_RTC = 9, + INT_FAULT = 10, + INT_VT_SERIAL = 12, + INT_VT_HOTPLUG = 13, + INT_DEV = 17, + INT_NMI = 18, + INT_LEGACY = 31, +}; + +extern struct irqaction timer_irqaction; +extern void init_rtc_irq(irq_handler_t handler); +extern void handle_irq(int irq); +extern void handle_ipi(struct pt_regs *regs); +extern void __init sw64_init_irq(void); +extern irqreturn_t timer_interrupt(int irq, void *dev); + +#endif /* _ASM_SW64_IRQ_IMPL_H */ diff --git a/arch/sw_64/include/asm/irqflags.h b/arch/sw_64/include/asm/irqflags.h new file mode 100644 index 000000000000..b4440f25a51d --- /dev/null +++ b/arch/sw_64/include/asm/irqflags.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQFLAGS_H +#define _ASM_SW64_IRQFLAGS_H + +#include + +#define IPL_MIN 0 +#define IPL_MAX 7 + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +static inline unsigned long arch_local_save_flags(void) +{ + return rdps(); +} + +static inline void arch_local_irq_disable(void) +{ + setipl(IPL_MAX); + barrier(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = swpipl(IPL_MAX); + + barrier(); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + barrier(); + setipl(IPL_MIN); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + barrier(); + setipl(flags); + barrier(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags > IPL_MIN; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(getipl()); +} + +#endif /* _ASM_SW64_IRQFLAGS_H */ diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c new file mode 100644 index 000000000000..126fe2f70495 --- /dev/null +++ b/arch/sw_64/kernel/irq.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/irq.c + * + * Copyright (C) 1995 Linus Torvalds + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + */ + +#include +#include +#include +#include + +volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + +void ack_bad_irq(unsigned int irq) +{ + irq_err_count++; + pr_crit("Unexpected IRQ trap at vector %u\n", irq); +} + +u64 arch_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; + + return sum; +} + +u64 arch_irq_stat(void) +{ + return 0; +} + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "TIMER"); + for_each_online_cpu(j) + seq_printf(p, "%10u", per_cpu(irq_stat, j).timer_irqs_event); + seq_puts(p, "\n"); + +#ifdef CONFIG_SMP + seq_printf(p, "%*s: ", prec, "IPI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", cpu_data[j].ipi_count); + seq_puts(p, "\n"); +#endif + seq_printf(p, "%*s: ", prec, "PMI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, "\n"); + + seq_printf(p, "ERR: %10lu\n", irq_err_count); + return 0; +} + +/* + * handle_irq handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ + +#define MAX_ILLEGAL_IRQS 16 + +void +handle_irq(int irq) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + static unsigned int illegal_count; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc || ((unsigned int) irq > ACTUAL_NR_IRQS && + illegal_count < MAX_ILLEGAL_IRQS)) { + irq_err_count++; + illegal_count++; + pr_crit("device_interrupt: invalid interrupt %d\n", irq); + return; + } + + irq_enter(); + generic_handle_irq_desc(desc); + irq_exit(); +} + +#ifdef CONFIG_HOTPLUG_CPU +void fixup_irqs(void) +{ + irq_migrate_all_off_this_cpu(); +} +#endif diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c new file mode 100644 index 000000000000..989d55ee1b1b --- /dev/null +++ b/arch/sw_64/kernel/irq_sw64.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 specific irq code. + */ + +#include +#include + +#include +#include + +void __init +init_IRQ(void) +{ + /* + * Just in case the platform init_irq() causes interrupts/mchecks + * (as is the case with RAWHIDE, at least). + */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + + wrent(entInt, 0); + + sw64_init_irq(); + irqchip_init(); +} + +DEFINE_SPINLOCK(irq_lock); + +static void +__enable_irq(struct irq_data *d) +{ +} + +static void +__disable_irq(struct irq_data *d) +{ +} + +static unsigned int +__startup_irq(struct irq_data *d) +{ + __enable_irq(d); + return 0; +} + +static void +__mask_and_ack_irq(struct irq_data *d) +{ + spin_lock(&irq_lock); + __disable_irq(d); + spin_unlock(&irq_lock); +} + +struct irq_chip sw64_irq_chip = { + .name = "SW64_NODE", + .irq_startup = __startup_irq, + .irq_unmask = __enable_irq, + .irq_mask = __disable_irq, + .irq_mask_ack = __mask_and_ack_irq, +}; + +void __weak arch_init_msi_domain(struct irq_domain *parent) {} + +int __init arch_early_irq_init(void) +{ + int i; + + for (i = 0; i < NR_IRQS; ++i) { + irq_set_chip_and_handler(i, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } + arch_init_msi_domain(NULL); + return 0; +} + +int __init arch_probe_nr_irqs(void) +{ + return NR_IRQS_LEGACY; +} diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c new file mode 100644 index 000000000000..533a6a14c200 --- /dev/null +++ b/arch/sw_64/kernel/time.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +#include "proto.h" + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +#define TICK_SIZE (tick_nsec / 1000) + +/* + * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting + * by 48 gives us 16 bits for HZ while keeping the accuracy good even + * for large CPU clock rates. + */ +#define FIX_SHIFT 48 + +unsigned long est_cycle_freq; + +#ifdef CONFIG_IRQ_WORK + +DEFINE_PER_CPU(u8, irq_work_pending); + +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) + +void arch_irq_work_raise(void) +{ + set_irq_work_pending_flag(); +} + +#else /* CONFIG_IRQ_WORK */ + +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() + +#endif /* CONFIG_IRQ_WORK */ + +void __init +time_init(void) +{ + unsigned long cycle_freq; + + cycle_freq = get_cpu_freq(); + + pr_info("CPU Cycle frequency = %ld Hz\n", cycle_freq); + + /* Register clocksource */ + sw64_setup_clocksource(); + of_clk_init(NULL); + /* Startup the timer source. */ + sw64_setup_timer(); + /* Calibrate the delay loop directly */ + lpj_fine = cycle_freq / HZ; +} -- Gitee From 1a1be65e2989cc1384c3b3a29ea2ba8fa0409d09 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:07 +0800 Subject: [PATCH 294/953] anolis: sw64: add exception handling support ANBZ: #4688 Add exception handling mechanism for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kdebug.h | 15 + arch/sw_64/include/uapi/asm/gentrap.h | 38 + arch/sw_64/include/uapi/asm/sysinfo.h | 20 + arch/sw_64/kernel/traps.c | 1542 +++++++++++++++++++++++++ arch/sw_64/kernel/unaligned.c | 80 ++ 5 files changed, 1695 insertions(+) create mode 100644 arch/sw_64/include/asm/kdebug.h create mode 100644 arch/sw_64/include/uapi/asm/gentrap.h create mode 100644 arch/sw_64/include/uapi/asm/sysinfo.h create mode 100644 arch/sw_64/kernel/traps.c create mode 100644 arch/sw_64/kernel/unaligned.c diff --git a/arch/sw_64/include/asm/kdebug.h b/arch/sw_64/include/asm/kdebug.h new file mode 100644 index 000000000000..73793057c3e8 --- /dev/null +++ b/arch/sw_64/include/asm/kdebug.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KDEBUG_H +#define _ASM_SW64_KDEBUG_H + +#include + +enum die_val { + DIE_OOPS = 1, + DIE_BREAK, + DIE_SSTEPBP, + DIE_UPROBE, + DIE_UPROBE_XOL, +}; + +#endif /* _ASM_SW64_KDEBUG_H */ diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h new file mode 100644 index 000000000000..3786b8b52add --- /dev/null +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_GENTRAP_H +#define _UAPI_ASM_SW64_GENTRAP_H + +/* + * Definitions for gentrap causes. They are generated by user-level + * programs and therefore should be compatible with the corresponding + * legacy definitions. + */ +#define GEN_INTOVF -1 /* integer overflow */ +#define GEN_INTDIV -2 /* integer division by zero */ +#define GEN_FLTOVF -3 /* fp overflow */ +#define GEN_FLTDIV -4 /* fp division by zero */ +#define GEN_FLTUND -5 /* fp underflow */ +#define GEN_FLTINV -6 /* invalid fp operand */ +#define GEN_FLTINE -7 /* inexact fp operand */ +#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define GEN_DECDIV -9 /* decimal division by zero */ +#define GEN_DECINV -10 /* invalid decimal operand */ +#define GEN_ROPRAND -11 /* reserved operand */ +#define GEN_ASSERTERR -12 /* assertion error */ +#define GEN_NULPTRERR -13 /* null pointer error */ +#define GEN_STKOVF -14 /* stack overflow */ +#define GEN_STRLENERR -15 /* string length error */ +#define GEN_SUBSTRERR -16 /* substring error */ +#define GEN_RANGERR -17 /* range error */ +#define GEN_SUBRNG -18 +#define GEN_SUBRNG1 -19 +#define GEN_SUBRNG2 -20 +#define GEN_SUBRNG3 -21 /* these report range errors for */ +#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ +#define GEN_SUBRNG5 -23 +#define GEN_SUBRNG6 -24 +#define GEN_SUBRNG7 -25 + +/* the remaining codes (-26..-1023) are reserved. */ + +#endif /* _UAPI_ASM_SW64_GENTRAP_H */ diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h new file mode 100644 index 000000000000..667405c3447c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm/sysinfo.h + */ + +#ifndef _UAPI_ASM_SW64_SYSINFO_H +#define _UAPI_ASM_SW64_SYSINFO_H + +#define GSI_IEEE_FP_CONTROL 45 + +#define SSI_IEEE_FP_CONTROL 14 +#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ + +#define UAC_BITMASK 7 +#define UAC_NOPRINT 1 +#define UAC_NOFIX 2 +#define UAC_SIGBUS 4 +#define PR_NOFIX 4 /* do not fix up unaligned accesses */ + +#endif /* _UAPI_ASM_SW64_SYSINFO_H */ diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c new file mode 100644 index 000000000000..a30e18ad1f00 --- /dev/null +++ b/arch/sw_64/kernel/traps.c @@ -0,0 +1,1542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/traps.c + * + * (C) Copyright 1994 Linus Torvalds + */ + +/* + * This file initializes the trap entry points + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +enum SW64_IF_TYPES { + IF_BREAKPOINT = 0, + IF_RESERVED, + IF_GENTRAP, + IF_FEN, + IF_OPDEC, + IF_SIMDEMU, +}; + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + + printk(KERN_DEFAULT "pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", + regs->pc, regs->regs[26], regs->ps, print_tainted()); + printk(KERN_DEFAULT "pc is at %pSR\n", (void *)regs->pc); + printk(KERN_DEFAULT "ra is at %pSR\n", (void *)regs->regs[26]); + printk(KERN_DEFAULT "v0 = %016lx t0 = %016lx t1 = %016lx\n", + regs->regs[0], regs->regs[1], regs->regs[2]); + printk(KERN_DEFAULT "t2 = %016lx t3 = %016lx t4 = %016lx\n", + regs->regs[3], regs->regs[4], regs->regs[5]); + printk(KERN_DEFAULT "t5 = %016lx t6 = %016lx t7 = %016lx\n", + regs->regs[6], regs->regs[7], regs->regs[8]); + + printk(KERN_DEFAULT "s0 = %016lx s1 = %016lx s2 = %016lx\n", + regs->regs[9], regs->regs[10], regs->regs[11]); + printk(KERN_DEFAULT "s3 = %016lx s4 = %016lx s5 = %016lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + printk(KERN_DEFAULT "s6 = %016lx\n", + regs->regs[15]); + + printk(KERN_DEFAULT "a0 = %016lx a1 = %016lx a2 = %016lx\n", + regs->regs[16], regs->regs[17], regs->regs[18]); + printk(KERN_DEFAULT "a3 = %016lx a4 = %016lx a5 = %016lx\n", + regs->regs[19], regs->regs[20], regs->regs[21]); + printk(KERN_DEFAULT "t8 = %016lx t9 = %016lx t10 = %016lx\n", + regs->regs[22], regs->regs[23], regs->regs[24]); + printk(KERN_DEFAULT "t11= %016lx pv = %016lx at = %016lx\n", + regs->regs[25], regs->regs[27], regs->regs[28]); + printk(KERN_DEFAULT "gp = %016lx sp = %016lx\n", regs->regs[29], regs->regs[30]); +} + +static void show_code(unsigned int *pc) +{ + long i; + unsigned int insn; + + printk(KERN_DEFAULT "Code:"); + for (i = -6; i < 2; i++) { + if (__get_user(insn, (unsigned int __user *)pc + i)) + break; + printk(KERN_DEFAULT "%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); + } + printk(KERN_DEFAULT "\n"); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + unsigned long flags; + int ret; + + oops_enter(); + + spin_lock_irqsave(&die_lock, flags); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + + print_modules(); + show_regs(regs); + show_code((unsigned int *)regs->pc); + show_stack(current, NULL, KERN_EMERG); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irqrestore(&die_lock, flags); + oops_exit(); + + if (kexec_should_crash(current)) + crash_kexec(regs); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +#ifndef CONFIG_MATHEMU +static long dummy_emul(void) +{ + return 0; +} + +long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul_imprecise); + +long (*sw64_fp_emul)(unsigned long pc) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul); +#else +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); +long sw64_fp_emul(unsigned long pc); +#endif + +asmlinkage void +do_entArith(unsigned long summary, unsigned long write_mask, + struct pt_regs *regs) +{ + long si_code = FPE_FLTINV; + + if (summary & 1) { + /* Software-completion summary bit is set, so try to + * emulate the instruction. If the processor supports + * precise exceptions, we don't have to search. + */ + si_code = sw64_fp_emul(regs->pc - 4); + if (si_code == 0) + return; + } + + if (!user_mode(regs)) + die("Arithmetic fault", regs, 0); + + /*summary<39> means integer divide by zero in C4.*/ + if ((summary >> 39) & 1) + si_code = FPE_INTDIV; + + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc); +} + +void simd_emulate(unsigned int inst, unsigned long va) +{ + unsigned long *fp; + int instr_opc, reg; + + instr_opc = (inst >> 26) & 0x3f; + reg = (inst >> 21) & 0x1f; + fp = (unsigned long *) va; + + switch (instr_opc) { + case 0x0d: /* vldd */ + sw64_write_simd_fp_reg_d(reg, fp[0], fp[1], fp[2], fp[3]); + return; + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + return; + } +} + +/* + * BPT/GENTRAP/OPDEC make regs->pc = exc_pc + 4. debugger should + * do something necessary to handle it correctly. + */ +asmlinkage void +do_entIF(unsigned long inst_type, unsigned long va, struct pt_regs *regs) +{ + int signo, code; + unsigned int inst, type; + + type = inst_type & 0xffffffff; + inst = inst_type >> 32; + + if (type == IF_SIMDEMU) { + simd_emulate(inst, va); + return; + } + + if (!user_mode(regs) && type != IF_OPDEC) { + if (type == IF_BREAKPOINT) { + /* support kgdb */ + notify_die(0, "kgdb trap", regs, 0, 0, SIGTRAP); + return; + } + die((type == IF_RESERVED ? "Kernel Bug" : "Instruction fault"), + regs, type); + } + + switch (type) { + case IF_BREAKPOINT: /* gdb do pc-4 for sigtrap */ + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + return; + + case IF_GENTRAP: + regs->pc -= 4; + switch ((long)regs->regs[16]) { + case GEN_INTOVF: + signo = SIGFPE; + code = FPE_INTOVF; + break; + case GEN_INTDIV: + signo = SIGFPE; + code = FPE_INTDIV; + break; + case GEN_FLTOVF: + signo = SIGFPE; + code = FPE_FLTOVF; + break; + case GEN_FLTDIV: + signo = SIGFPE; + code = FPE_FLTDIV; + break; + case GEN_FLTUND: + signo = SIGFPE; + code = FPE_FLTUND; + break; + case GEN_FLTINV: + signo = SIGFPE; + code = FPE_FLTINV; + break; + case GEN_FLTINE: + signo = SIGFPE; + code = FPE_FLTRES; + break; + case GEN_ROPRAND: + signo = SIGFPE; + code = FPE_FLTUNK; + break; + + case GEN_DECOVF: + case GEN_DECDIV: + case GEN_DECINV: + case GEN_ASSERTERR: + case GEN_NULPTRERR: + case GEN_STKOVF: + case GEN_STRLENERR: + case GEN_SUBSTRERR: + case GEN_RANGERR: + case GEN_SUBRNG: + case GEN_SUBRNG1: + case GEN_SUBRNG2: + case GEN_SUBRNG3: + case GEN_SUBRNG4: + case GEN_SUBRNG5: + case GEN_SUBRNG6: + case GEN_SUBRNG7: + default: + regs->pc += 4; + signo = SIGTRAP; + code = TRAP_UNK; + break; + } + + force_sig_fault(signo, code, (void __user *)regs->pc); + return; + + case IF_FEN: + fpu_enable(); + return; + + case IF_OPDEC: + switch (inst) { +#ifdef CONFIG_KPROBES + case BREAK_KPROBE: + if (notify_die(DIE_BREAK, "kprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case BREAK_KPROBE_SS: + if (notify_die(DIE_SSTEPBP, "single_step", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; +#endif +#ifdef CONFIG_UPROBES + case UPROBE_BRK_UPROBE: + if (notify_die(DIE_UPROBE, "uprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case UPROBE_BRK_UPROBE_XOL: + if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; +#endif + } + + if (user_mode(regs)) + regs->pc -= 4; + else + die("Instruction fault", regs, type); + break; + + default: /* unexpected instruction-fault type */ + regs->pc -= 4; + break; + } + + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc); +} + +asmlinkage void +do_entUna(void *va, unsigned long opcode, unsigned long reg, + struct pt_regs *regs) +{ + long error; + unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + unsigned long pc = regs->pc - 4; + + /* + * We don't want to use the generic get/put unaligned macros as + * we want to trap exceptions. Only if we actually get an + * exception will we decide whether we should have caught it. + */ + + switch (opcode) { + case 0x21: + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x22: + __asm__ __volatile__( + "1: ldl_u %1,0(%3)\n" + "2: ldl_u %2,3(%3)\n" + " extlw %1,%3,%1\n" + " exthw %2,%3,%2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = (int)(tmp1 | tmp2); + return; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x29: /* sth */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2a: /* stw */ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1,%2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + } + + pr_warn("Bad unaligned kernel access at %016lx: %p %lx %lu\n", + pc, va, opcode, reg); + make_task_dead(SIGSEGV); + +got_exception: + /* Ok, we caught the exception, but we don't want it. Is there + * someone to pass it along to? + */ + if (fixup_exception(regs, pc)) { + pr_info("Forwarding unaligned exception at %lx (%lx)\n", + pc, regs->pc); + return; + } + + /* + * Yikes! No one to forward the exception to. + * Since the registers are in a weird format, dump them ourselves. + */ + + die("Unhandled unaligned exception", regs, error); +} + +/* + * Handle user-level unaligned fault. Handling user-level unaligned + * faults is *extremely* slow and produces nasty messages. A user + * program *should* fix unaligned faults ASAP. + * + * Notice that we have (almost) the regular kernel stack layout here, + * so finding the appropriate registers is a little more difficult + * than in the kernel case. + * + * Finally, we handle regular integer load/stores only. In + * particular, load-linked/store-conditionally and floating point + * load/stores are not supported. The former make no sense with + * unaligned faults (they are guaranteed to fail) and I don't think + * the latter will occur in any decent program. + * + * Sigh. We *do* have to handle some FP operations, because GCC will + * uses them as temporary storage for integer memory to memory copies. + * However, we need to deal with stt/ldt and sts/lds only. + */ +#define OP_INT_MASK (1L << 0x22 | 1L << 0x2a | /* ldw stw */ \ + 1L << 0x23 | 1L << 0x2b | /* ldl stl */ \ + 1L << 0x21 | 1L << 0x29 | /* ldhu sth */ \ + 1L << 0x20 | 1L << 0x28) /* ldbu stb */ + +asmlinkage void +do_entUnaUser(void __user *va, unsigned long opcode, + unsigned long reg, struct pt_regs *regs) +{ +#ifdef CONFIG_UNA_PRINT + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); +#endif + + unsigned long tmp1, tmp2, tmp3, tmp4; + unsigned long fake_reg, *reg_addr = &fake_reg; + int si_code; + long error; + unsigned long tmp, tmp5, tmp6, tmp7, tmp8, vb; + unsigned long fp[4]; + unsigned long instr, instr_op, value; + +#ifdef CONFIG_DEBUG_FS + /* + * If command name is specified, record some information + * to debugfs. + */ + if (unaligned_task[0] && !strcmp(unaligned_task, current->comm)) { + int idx; + + idx = unaligned_count % UNA_MAX_ENTRIES; + unaligned[idx].va = (unsigned long)va; + unaligned[idx].pc = regs->pc; + unaligned_count++; + } +#endif + + /* Check the UAC bits to decide what the user wants us to do + * with the unaliged access. + */ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, regs, regs->pc - 4); + +#ifdef CONFIG_UNA_PRINT + if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { + if (__ratelimit(&ratelimit)) { + pr_info("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", + current->comm, task_pid_nr(current), + regs->pc - 4, va, opcode, reg); + } + } +#endif + if ((current_thread_info()->status & TS_UAC_SIGBUS)) + goto give_sigbus; + /* Not sure why you'd want to use this, but... */ + if ((current_thread_info()->status & TS_UAC_NOFIX)) + return; + + /* Don't bother reading ds in the access check since we already + * know that this came from the user. Also rely on the fact that + * the page at TASK_SIZE is unmapped and so can't be touched anyway. + */ + if ((unsigned long)va >= TASK_SIZE) + goto give_sigsegv; + + if ((1L << opcode) & OP_INT_MASK) { + /* it's an integer load/store */ + if (reg < 31) { + reg_addr = ®s->regs[reg]; + } else { + /* zero "register" */ + fake_reg = 0; + } + } + + get_user(instr, (__u32 *)(regs->pc - 4)); + instr_op = (instr >> 26) & 0x3f; + + get_user(value, (__u64 *)va); + + switch (instr_op) { + + case 0x0c: /* vlds */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp1 = tmp1 | tmp4; + tmp2 = tmp5 | tmp3; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } + case 0x0a: /* ldse */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + tmp = tmp | (tmp << 32); + + sw64_write_simd_fp_reg_s(reg, tmp, tmp); + + return; + + case 0x0d: /* vldd */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3: ldl %3, 16(%5)\n" + "4: ldl %4, 24(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi %4, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_d(reg, tmp1, tmp2, tmp3, tmp4); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp7 = tmp1 | tmp4; //f0 + tmp8 = tmp5 | tmp3; //f1 + + vb = ((unsigned long)(va))+16; + + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(vb), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp4; // f2 + tmp2 = tmp5 | tmp3; // f3 + + sw64_write_simd_fp_reg_d(reg, tmp7, tmp8, tmp, tmp2); + return; + } + + case 0x0b: /* ldde */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + + sw64_write_simd_fp_reg_d(reg, tmp, tmp, tmp, tmp); + return; + + case 0x09: /* ldwe */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_ldwe(reg, (int)(tmp1 | tmp2)); + + return; + + case 0x0e: /* vsts */ + sw64_read_simd_fp_m_s(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va)+16; + + + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[2]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + } + switch (opcode) { + case 0x21: /* ldhu */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + case 0x26: /* flds */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg_s(reg, tmp1 | tmp2); + return; + + case 0x27: /* fldd */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg(reg, tmp1 | tmp2); + return; + + case 0x22: /* ldw */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = (int)(tmp1 | tmp2); + break; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + /* Note that the store sequences do not indicate that they change + * memory because it _should_ be affecting nothing in this context. + * (Otherwise we have other, much larger, problems.) + */ + case 0x29: /* sth with stb */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2e: /* fsts*/ + fake_reg = sw64_read_fp_reg_s(reg); + fallthrough; + + case 0x2a: /* stw with stb*/ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2f: /* fstd */ + fake_reg = sw64_read_fp_reg(reg); + fallthrough; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + default: + /* What instruction were you trying to use, exactly? */ + goto give_sigbus; + } + + return; + +give_sigsegv: + regs->pc -= 4; /* make pc point to faulting insn */ + + /* We need to replicate some of the logic in mm/fault.c, + * since we don't have access to the fault code in the + * exception handling return path. + */ + if ((unsigned long)va >= TASK_SIZE) + si_code = SEGV_ACCERR; + else { + struct mm_struct *mm = current->mm; + + down_read(&mm->mmap_lock); + if (find_vma(mm, (unsigned long)va)) + si_code = SEGV_ACCERR; + else + si_code = SEGV_MAPERR; + up_read(&mm->mmap_lock); + } + force_sig_fault(SIGSEGV, si_code, va); + return; + +give_sigbus: + regs->pc -= 4; + force_sig_fault(SIGBUS, BUS_ADRALN, va); +} + +asmlinkage void do_entSys(struct pt_regs *regs) +{ + long ret = -ENOSYS; + unsigned long nr; + unsigned long ti_flags = current_thread_info()->flags; + + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + nr = regs->regs[0]; + + if (ti_flags & _TIF_SYSCALL_WORK) { + nr = syscall_trace_enter(); + if (nr == NO_SYSCALL) + goto syscall_out; + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + } + + if (nr < __NR_syscalls) { + syscall_fn_t syscall_fn = sys_call_table[nr]; + + ret = syscall_fn(regs->regs[16], regs->regs[17], regs->regs[18], + regs->regs[19], regs->regs[20], regs->regs[21]); + } + + if ((nr != __NR_sigreturn) && (nr != __NR_rt_sigreturn)) { + if (likely((ret >= 0) || regs->orig_r0 == NO_SYSCALL)) + syscall_set_return_value(current, regs, 0, ret); + else + syscall_set_return_value(current, regs, ret, 0); + } + +syscall_out: + rseq_syscall(regs); + + if (ti_flags & _TIF_SYSCALL_WORK) + syscall_trace_leave(); +} + +void +trap_init(void) +{ + /* Tell HMcode what global pointer we want in the kernel. */ + register unsigned long gptr __asm__("$29"); + wrkgp(gptr); + + wrent(entArith, 1); + wrent(entMM, 2); + wrent(entIF, 3); + wrent(entUna, 4); + wrent(entSys, 5); +#ifdef CONFIG_EFI + if (smp_processor_id() == 0) + wrent((void *)entSuspend, 6); +#endif +} diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c new file mode 100644 index 000000000000..40a17fb9cbd2 --- /dev/null +++ b/arch/sw_64/kernel/unaligned.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +unsigned long unaligned_count; +char unaligned_task[TASK_COMM_LEN]; +struct unaligned_stat unaligned[UNA_MAX_ENTRIES]; + +static ssize_t unaligned_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + + unaligned_count = 0; + size = min(sizeof(unaligned_task), len); + if (copy_from_user(unaligned_task, user_buf, size)) + return -EFAULT; + unaligned_task[size - 1] = '\0'; + + return len; +} + +static int unaligned_show(struct seq_file *m, void *v) +{ + int i, idx, nr; + + if (!unaligned_task[0]) { + seq_puts(m, "No task traced\n"); + return 0; + } + seq_printf(m, "Task command:\t\t%s\n", unaligned_task); + seq_printf(m, "Unaligned count:\t%ld\n", unaligned_count); + if (!unaligned_count) + return 0; + nr = 0; + idx = unaligned_count % UNA_MAX_ENTRIES; + seq_printf(m, "Latest %d unaligned stat:\nNo.\tVA\t\tPC\n", UNA_MAX_ENTRIES); + if (unaligned_count >= UNA_MAX_ENTRIES) { + for (i = idx; i < UNA_MAX_ENTRIES; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + } + for (i = 0; i < idx; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + return 0; +} + +static int unaligned_open(struct inode *inode, struct file *file) +{ + return single_open(file, unaligned_show, NULL); +} + +static const struct file_operations unaligned_fops = { + .read = seq_read, + .write = unaligned_set, + .open = unaligned_open, + .llseek = default_llseek, +}; + +static int __init unaligned_init(void) +{ + struct dentry *unaligned; + + if (!sw64_debugfs_dir) + return -ENODEV; + + unaligned = debugfs_create_file("unaligned", 0644, + sw64_debugfs_dir, NULL, + &unaligned_fops); + if (!unaligned) + return -ENOMEM; + + return 0; +} + +late_initcall(unaligned_init); -- Gitee From 770eed53d30ac4b08c3cf617937a7447e5d882a1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:31 +0800 Subject: [PATCH 295/953] anolis: sw64: add process management ANBZ: #4688 Add process management support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/current.h | 19 + arch/sw_64/include/asm/processor.h | 100 ++++ arch/sw_64/include/asm/ptrace.h | 92 +++ arch/sw_64/include/asm/switch_to.h | 60 ++ arch/sw_64/include/asm/thread_info.h | 148 +++++ arch/sw_64/include/uapi/asm/ptrace.h | 56 ++ arch/sw_64/kernel/idle.c | 35 ++ arch/sw_64/kernel/process.c | 109 ++++ arch/sw_64/kernel/ptrace.c | 858 +++++++++++++++++++++++++++ 9 files changed, 1477 insertions(+) create mode 100644 arch/sw_64/include/asm/current.h create mode 100644 arch/sw_64/include/asm/processor.h create mode 100644 arch/sw_64/include/asm/ptrace.h create mode 100644 arch/sw_64/include/asm/switch_to.h create mode 100644 arch/sw_64/include/asm/thread_info.h create mode 100644 arch/sw_64/include/uapi/asm/ptrace.h create mode 100644 arch/sw_64/kernel/idle.c create mode 100644 arch/sw_64/kernel/process.c create mode 100644 arch/sw_64/kernel/ptrace.c diff --git a/arch/sw_64/include/asm/current.h b/arch/sw_64/include/asm/current.h new file mode 100644 index 000000000000..862caabb9c70 --- /dev/null +++ b/arch/sw_64/include/asm/current.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CURRENT_H +#define _ASM_SW64_CURRENT_H + +#ifndef __ASSEMBLY__ + +struct task_struct; +static __always_inline struct task_struct *get_current(void) +{ + register struct task_struct *tp __asm__("$8"); + + return tp; +} + +#define current get_current() + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SW64_CURRENT_H */ diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h new file mode 100644 index 000000000000..ec68fe6cc6f2 --- /dev/null +++ b/arch/sw_64/include/asm/processor.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw64/processor.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef _ASM_SW64_PROCESSOR_H +#define _ASM_SW64_PROCESSOR_H + +#include /* for ADDR_LIMIT_32BIT */ +#include + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task->stack + THREAD_SIZE) - 1) + +/* + * Returns current instruction pointer ("program counter"). + */ +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0, .+4" : "=r"(__pc)); __pc; }) + +/* + * SW64 does have an arch_pick_mmap_layout() + */ +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 + +/* + * We have a 52-bit user address space: 4PB user VM... + */ +#define TASK_SIZE (0x10000000000000UL) +#define UNMAPPED_BASE (TASK_SIZE >> 6) +#define STACK_TOP \ + (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) + +#define STACK_TOP_MAX 0x00120000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE) + +struct thread_struct { + struct user_fpsimd_state fpstate; + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; + unsigned long s[7]; /* s0 ~ s6 */ +}; +#define INIT_THREAD { } + +struct task_struct; +struct pt_regs; + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *dead_task); + +unsigned long __get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) + +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[30]) + +#define cpu_relax() barrier() + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifndef CONFIG_SMP +/* Nothing to prefetch. */ +#define spin_lock_prefetch(lock) do { } while (0) +#endif + +static inline void prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 0, 3); +} + +static inline void prefetchw(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} + +#ifdef CONFIG_SMP +static inline void spin_lock_prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} +#endif + +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("halt"); +} +#endif /* _ASM_SW64_PROCESSOR_H */ diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h new file mode 100644 index 000000000000..964f4fc730f2 --- /dev/null +++ b/arch/sw_64/include/asm/ptrace.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PTRACE_H +#define _ASM_SW64_PTRACE_H + +#include +#include +#include + +#define NO_SYSCALL _AC(-1, UL) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + */ + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + unsigned long regs[31]; + unsigned long pc; + unsigned long ps; + }; + }; + unsigned long orig_r0; + unsigned long orig_r19; + /* These are saved by HMcode: */ + unsigned long hm_ps; + unsigned long hm_pc; + unsigned long hm_gp; + unsigned long hm_r16; + unsigned long hm_r17; + unsigned long hm_r18; +}; + +#define arch_has_single_step() (1) +#define user_mode(regs) (((regs)->ps & 8) != 0) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(pt_regs) ((pt_regs)->regs[30]) +#define kernel_stack_pointer(regs) ((unsigned long)((regs) + 1)) +#define instruction_pointer_set(regs, val) ((regs)->pc = val) + +#define force_successful_syscall_return() (current_pt_regs()->orig_r0 = NO_SYSCALL) + +#define MAX_REG_OFFSET (offsetof(struct pt_regs, orig_r0)) + +extern short regoffsets[]; + +extern unsigned long syscall_trace_enter(void); +extern void syscall_trace_leave(void); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +static inline int is_syscall_success(struct pt_regs *regs) +{ + return !regs->regs[19]; +} + +static inline long regs_return_value(struct pt_regs *regs) +{ + if ((regs->orig_r0 == NO_SYSCALL) || is_syscall_success(regs)) + return regs->regs[0]; + else + return -regs->regs[0]; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h new file mode 100644 index 000000000000..5e2db4b9e266 --- /dev/null +++ b/arch/sw_64/include/asm/switch_to.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SWITCH_TO_H +#define _ASM_SW64_SWITCH_TO_H + +#include + +extern void __fpstate_save(struct task_struct *save_to); +extern void __fpstate_restore(struct task_struct *restore_from); +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); +extern void restore_da_match_after_sched(void); + +static inline void aux_save(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + pcb->tp = rtid(); + __fpstate_save(task); + } +} + +static inline void aux_restore(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + wrtp(pcb->tp); + __fpstate_restore(task); + } +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + aux_save(prev); + aux_restore(next); +} + + +#define switch_to(prev, next, last) \ +do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_aux(__prev, __next); \ + (last) = __switch_to(__prev, __next); \ +} while (0) + + +/* TODO: finish_arch_switch has been removed from arch-independent code. */ + +/* + * finish_arch_switch will be called after switch_to + */ +#define finish_arch_post_lock_switch restore_da_match_after_sched + + +#endif /* _ASM_SW64_SWITCH_TO_H */ diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h new file mode 100644 index 000000000000..4f3b837e2e90 --- /dev/null +++ b/arch/sw_64/include/asm/thread_info.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_THREAD_INFO_H +#define _ASM_SW64_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +typedef struct { + unsigned long seg; +} mm_segment_t; + + +struct pcb_struct { + unsigned long tp; + unsigned long da_match, da_mask; + unsigned long dv_match, dv_mask; + union { + unsigned long dc_ctl; + unsigned long match_ctl; + }; + unsigned long ia_match, ia_mask; + unsigned long iv_match; + unsigned long ida_match, ida_mask; +}; + +struct thread_info { + struct pcb_struct pcb; /* hmcode state */ + + unsigned int flags; /* low level flags */ + unsigned int ieee_state; /* see fpu.h */ + + mm_segment_t addr_limit; /* thread address space */ + unsigned int cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + unsigned int status; /* thread-synchronous flags */ + + int bpt_nsaved; + unsigned long bpt_addr[2]; /* breakpoint handling */ + unsigned int bpt_insn[2]; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long dyn_ftrace_addr; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long dyn_ftrace_regs_addr; +#endif +#endif +}; + +static __always_inline u64 rtid(void) +{ + u64 val; + + asm volatile("rtid %0" : "=r" (val) : :); + return val; +} + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .addr_limit = KERNEL_DS, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + + +#endif /* __ASSEMBLY__ */ + +/* Thread information allocation. */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (2 * PAGE_SIZE) + +/* + * Thread information flags: + * - these are process state flags and used from assembly + * - pending work-to-be-done flags come first and must be assigned to be + * within bits 0 to 7 to fit in and immediate operand. + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* secure computing */ +#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + +/* Work to do on interrupt/exception return. */ +#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +/* Work to do on any return to userspace. */ +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE) + +#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ +#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ +#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'prctl' */ + +#define SET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + if (value & PR_UNALIGN_NOPRINT) \ + status |= TS_UAC_NOPRINT; \ + if (value & PR_UNALIGN_SIGBUS) \ + status |= TS_UAC_SIGBUS; \ + if (value & PR_NOFIX) /* sw-specific */ \ + status |= TS_UAC_NOFIX; \ + task_thread_info(task)->status = status; \ + 0; }) + +#define GET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + __u32 res = 0; \ + if (status & TS_UAC_NOPRINT) \ + res |= PR_UNALIGN_NOPRINT; \ + if (status & TS_UAC_SIGBUS) \ + res |= PR_UNALIGN_SIGBUS; \ + if (status & TS_UAC_NOFIX) \ + res |= PR_NOFIX; \ + put_user(res, (int __user *)(value)); \ + }) + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_THREAD_INFO_H */ diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..3fd53450e418 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PTRACE_H +#define _UAPI_ASM_SW64_PTRACE_H + +#include + +#ifndef __ASSEMBLY__ +/* + * User structures for general purpose, floating point and debug registers. + */ +struct user_pt_regs { + __u64 regs[31]; + __u64 pc; + __u64 pstate; +}; + +/* 256 bits aligned for simd */ +struct fpreg { + __u64 v[4] __attribute__((aligned(32))); +}; + +struct user_fpsimd_state { + struct fpreg fp[31]; + __u64 fpcr; + __u64 __reserved[3]; +}; +#endif + +/* PTRACE_ATTACH is 16 */ +/* PTRACE_DETACH is 17 */ + +#define PT_REG_BASE 0 +#define PT_REG_END 30 +#define PT_FPREG_BASE 32 +#define PT_FPREG_END 62 +#define PT_FPCR 63 +#define PT_PC 64 +#define PT_TP 65 +#define PT_UNIQUE PT_TP +#define PT_VECREG_BASE 67 +#define PT_VECREG_END 161 +#define PT_F31_V1 98 +#define PT_F31_V2 130 +#define PT_DA_MATCH 163 +#define PT_DA_MASK 164 +#define PT_DV_MATCH 165 +#define PT_DV_MASK 166 +#define PT_DC_CTL 167 +#define PT_MATCH_CTL 167 +#define PT_IA_MATCH 168 +#define PT_IA_MASK 169 +#define PT_IV_MATCH 170 +#define PT_IDA_MATCH 171 +#define PT_IDA_MASK 172 + +#endif /* _UAPI_ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/kernel/idle.c b/arch/sw_64/kernel/idle.c new file mode 100644 index 000000000000..d26bdc405b53 --- /dev/null +++ b/arch/sw_64/kernel/idle.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 idle loop support. + * + */ +#include +#include +#include +#include +#include + +void arch_cpu_idle(void) +{ + local_irq_enable(); + cpu_relax(); + + if (is_in_guest()) { + if (!need_resched()) + hcall(HCALL_HALT, 0, 0, 0); + } else { + asm( + ".globl __idle_start\n" + "__idle_start = .\n" + "ldw $1, %0($8)\n" + "srl $1, %1, $1\n" + "blbs $1, $need_resched\n" + "halt\n" + ".globl __idle_end\n" + "__idle_end = .\n" + "$need_resched:" + :: "i"(TI_FLAGS), "i"(TIF_NEED_RESCHED) + : "$1"); + } + local_irq_disable(); +} diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c new file mode 100644 index 000000000000..fa58a0de4368 --- /dev/null +++ b/arch/sw_64/kernel/process.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file handles the architecture-dependent parts of process handling. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "proto.h" + +/* + * Re-start a thread when doing execve() + */ +void +start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + regs->pc = pc; + regs->ps = 8; + regs->regs[30] = sp; +} +EXPORT_SYMBOL(start_thread); + + +void +flush_thread(void) +{ + /* Arrange for each exec'ed process to start off with a clean slate + * with respect to the FPU. This is all exceptions disabled. + */ + current_thread_info()->ieee_state = 0; + wrfpcr(FPCR_INIT | ieee_swcr_to_fpcr(0)); + + /* Clean slate for TLS. */ + current_thread_info()->pcb.tp = 0; +} + +void +release_thread(struct task_struct *dead_task) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * aux_save() has to read the current TLS pointer from CSR:TID as it + * may be out-of-sync with the saved value. + */ + aux_save(src); + *dst = *src; + return 0; +} + +/* + * Copy architecture-specific thread state + */ + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct thread_info *childti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + struct pt_regs *regs = current_pt_regs(); + + extern void ret_from_fork(void); + extern void ret_from_kernel_thread(void); + + p->thread.sp = (unsigned long) childregs; + + if (unlikely(args->fn)) { + /* kernel thread */ + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ra = (unsigned long) ret_from_kernel_thread; + p->thread.s[0] = (unsigned long) args->fn; /* function */ + p->thread.s[1] = (unsigned long) args->fn_arg; + return 0; + } + + /* + * Note: if CLONE_SETTLS is not set, then we must inherit the + * value from the parent, which will have been set by the block + * copy in dup_task_struct. This is non-intuitive, but is + * required for proper operation in the case of a threaded + * application calling fork. + */ + if (clone_flags & CLONE_SETTLS) + childti->pcb.tp = tls; + else + regs->regs[20] = 0; + *childregs = *regs; + if (usp) + childregs->regs[30] = usp; + syscall_set_return_value(NULL, childregs, 0, 0); + p->thread.ra = (unsigned long) ret_from_fork; + return 0; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_page(mm->brk, 0x02000000); +} diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c new file mode 100644 index 000000000000..070e27ee2567 --- /dev/null +++ b/arch/sw_64/kernel/ptrace.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* mangled further by Bob Manson (manson@santafe.edu) */ +/* more mutilation by David Mosberger (davidm@azstarnet.com) */ + +#include +#include +#include +#include + +#include + +#include "proto.h" +#include + +#define CREATE_TRACE_POINTS +#include + +#define BREAKINST 0x00000080 /* sys_call bpt */ + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Processes always block with the following stack-layout: + * + * +================================+ <---- task + 2*PAGE_SIZE + * | HMcode saved frame (ps, pc, | ^ + * | gp, a0, a1, a2) | | + * +================================+ | struct pt_regs + * | | | + * | frame generated by SAVE_ALL | | + * | | v + * +================================+ + */ + +/* + * The following table maps a register index into the stack offset at + * which the register is saved. Register indices are 0-31 for integer + * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and + * zero have no stack-slot and need to be treated specially (see + * get_reg/put_reg below). + */ +#define PCB_OFF(var) offsetof(struct pcb_struct, var) + +static int pcboff[] = { + [PT_TP] = PCB_OFF(tp), + [PT_DA_MATCH] = PCB_OFF(da_match), + [PT_DA_MASK] = PCB_OFF(da_mask), + [PT_DV_MATCH] = PCB_OFF(dv_match), + [PT_DV_MASK] = PCB_OFF(dv_mask), + [PT_DC_CTL] = PCB_OFF(dc_ctl), + [PT_MATCH_CTL] = PCB_OFF(match_ctl), + [PT_IA_MATCH] = PCB_OFF(ia_match), + [PT_IA_MASK] = PCB_OFF(ia_mask), + [PT_IV_MATCH] = PCB_OFF(iv_match), + [PT_IDA_MATCH] = PCB_OFF(ida_match), + [PT_IDA_MASK] = PCB_OFF(ida_mask) +}; + +static unsigned long zero; + +/* + * Get address of register REGNO in task TASK. + */ + +static unsigned long * +get_reg_addr(struct task_struct *task, unsigned long regno) +{ + void *addr; + int fno, vno; + + switch (regno) { + case PT_UNIQUE: + case PT_DA_MATCH: + case PT_DA_MASK: + case PT_DV_MATCH: + case PT_DV_MASK: + case PT_MATCH_CTL: + case PT_IA_MATCH: + case PT_IA_MASK: + case PT_IV_MATCH: + case PT_IDA_MATCH: + case PT_IDA_MASK: + addr = (void *)task_thread_info(task) + pcboff[regno]; + break; + case PT_REG_BASE ... PT_REG_END: + addr = &task_pt_regs(task)->regs[regno]; + break; + case PT_FPREG_BASE ... PT_FPREG_END: + fno = regno - PT_FPREG_BASE; + addr = &task->thread.fpstate.fp[fno].v[0]; + break; + case PT_VECREG_BASE ... PT_VECREG_END: + /* + * return addr for zero value if we catch vectors of f31 + * v0 and v3 of f31 are not in this range so ignore them + */ + if (regno == PT_F31_V1 || regno == PT_F31_V2) { + addr = &zero; + break; + } + fno = (regno - PT_VECREG_BASE) & 0x1f; + vno = 1 + ((regno - PT_VECREG_BASE) >> 5); + addr = &task->thread.fpstate.fp[fno].v[vno]; + break; + case PT_FPCR: + addr = &task->thread.fpstate.fpcr; + break; + case PT_PC: + addr = (void *)task_pt_regs(task) + PT_REGS_PC; + break; + default: + addr = &zero; + } + + return addr; +} + +/* + * Get contents of register REGNO in task TASK. + */ +unsigned long +get_reg(struct task_struct *task, unsigned long regno) +{ + return *get_reg_addr(task, regno); +} + +/* + * Write contents of register REGNO in task TASK. + */ +static int +put_reg(struct task_struct *task, unsigned long regno, unsigned long data) +{ + *get_reg_addr(task, regno) = data; + return 0; +} + +static inline int +read_int(struct task_struct *task, unsigned long addr, int *data) +{ + int copied = access_process_vm(task, addr, data, sizeof(int), FOLL_FORCE); + + return (copied == sizeof(int)) ? 0 : -EIO; +} + +static inline int +write_int(struct task_struct *task, unsigned long addr, int data) +{ + int copied = access_process_vm(task, addr, &data, sizeof(int), + FOLL_FORCE | FOLL_WRITE); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +/* + * Set breakpoint. + */ +int +ptrace_set_bpt(struct task_struct *child) +{ + int displ, i, res, reg_b, nsaved = 0; + unsigned int insn, op_code; + unsigned long pc; + + pc = get_reg(child, PT_PC); + res = read_int(child, pc, (int *)&insn); + if (res < 0) + return res; + + op_code = insn >> 26; + /* br bsr beq bne blt ble bgt bge blbc blbs fbeq fbne fblt fble fbgt fbge */ + if ((1UL << op_code) & 0x3fff000000000030UL) { + /* + * It's a branch: instead of trying to figure out + * whether the branch will be taken or not, we'll put + * a breakpoint at either location. This is simpler, + * more reliable, and probably not a whole lot slower + * than the alternative approach of emulating the + * branch (emulation can be tricky for fp branches). + */ + displ = ((s32)(insn << 11)) >> 9; + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + if (displ) /* guard against unoptimized code */ + task_thread_info(child)->bpt_addr[nsaved++] + = pc + 4 + displ; + /*call ret jmp*/ + } else if (op_code >= 0x1 && op_code <= 0x3) { + reg_b = (insn >> 16) & 0x1f; + task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); + } else { + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + } + + /* install breakpoints: */ + for (i = 0; i < nsaved; ++i) { + res = read_int(child, task_thread_info(child)->bpt_addr[i], + (int *)&insn); + if (res < 0) + return res; + task_thread_info(child)->bpt_insn[i] = insn; + res = write_int(child, task_thread_info(child)->bpt_addr[i], + BREAKINST); + if (res < 0) + return res; + } + task_thread_info(child)->bpt_nsaved = nsaved; + return 0; +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +int +ptrace_cancel_bpt(struct task_struct *child) +{ + int i, nsaved = task_thread_info(child)->bpt_nsaved; + + task_thread_info(child)->bpt_nsaved = 0; + + if (nsaved > 2) { + pr_info("%s: bogus nsaved: %d!\n", __func__, nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; ++i) { + write_int(child, task_thread_info(child)->bpt_addr[i], + task_thread_info(child)->bpt_insn[i]); + } + return (nsaved != 0); +} + +void user_enable_single_step(struct task_struct *child) +{ + /* Mark single stepping. */ + task_thread_info(child)->bpt_nsaved = -1; +} + +void user_disable_single_step(struct task_struct *child) +{ + ptrace_cancel_bpt(child); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_write(&to, task_pt_regs(target), sizeof(struct user_pt_regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_pt_regs(target), 0, sizeof(struct user_pt_regs)); +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + + return membuf_write(&to, &target->thread.fpstate, + sizeof(struct user_fpsimd_state)); +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpstate, 0, + sizeof(struct user_fpsimd_state)); +} + +enum sw64_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static const struct user_regset sw64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpr_get, + .set = fpr_set + }, +}; + +static const struct user_regset_view user_sw64_view = { + .name = "sw64", .e_machine = EM_SW64, + .regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sw64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long tmp; + size_t copied; + long ret; + + switch (request) { + /* When I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + force_successful_syscall_return(); + ret = tmp; + break; + + /* Read register number ADDR. */ + case PTRACE_PEEKUSR: + force_successful_syscall_return(); + ret = get_reg(child, addr); + break; + + /* When I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + + case PTRACE_POKEUSR: /* write the specified register */ + ret = put_reg(child, addr, data); + break; + default: + ret = ptrace_request(child, request, addr, data); + break; + } + return ret; +} + +asmlinkage unsigned long syscall_trace_enter(void) +{ + unsigned long ret = 0; + struct pt_regs *regs = current_pt_regs(); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + ptrace_report_syscall_entry(regs)) + return NO_SYSCALL; + +#ifdef CONFIG_SECCOMP + /* Do seccomp after ptrace, to catch any tracer changes. */ + if (secure_computing() == -1) + return NO_SYSCALL; +#endif + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + audit_syscall_entry(regs->regs[0], regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); + return ret ?: regs->regs[0]; +} + +asmlinkage void +syscall_trace_leave(void) +{ + struct pt_regs *regs = current_pt_regs(); + + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs_return_value(regs)); +} + +#ifdef CONFIG_SUBARCH_C3B +static long rwcsr(int rw, unsigned long csr, unsigned long value) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = rw; + register unsigned long __r17 __asm__("$17") = csr; + register unsigned long __r18 __asm__("$18") = value; + + __asm__ __volatile__( + "sys_call %4" + : "=r"(__r0), "=r"(__r16), "=r"(__r17), "=r"(__r18) + : "i"(HMC_rwreg), "1"(__r16), "2"(__r17), "3"(__r18) + : "$1", "$22", "$23", "$24", "$25"); + + return __r0; +} + +#define RCSR 0 +#define WCSR 1 + +#define CSR_DA_MATCH 0 +#define CSR_DA_MASK 1 +#define CSR_IA_MATCH 2 +#define CSR_IA_MASK 3 +#define CSR_IDA_MATCH 6 +#define CSR_IDA_MASK 7 +#define CSR_DC_CTL 11 +#define CSR_DV_MATCH 15 +#define CSR_DV_MASK 16 + +#define DV_MATCH_EN_S 19 +#define DAV_MATCH_EN_S 20 + +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + unsigned long dc_ctl; + unsigned long value; + + pr_info("%s: pid %d, name = %s,cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + if (mmcsr == MMCSR__DV_MATCH) { + value = rwcsr(RCSR, CSR_DV_MATCH, 0); + pr_notice("value is %#lx\n", value); + value = rwcsr(RCSR, CSR_DV_MASK, 0); + pr_notice("value is %#lx\n", value); + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + return 1; + } + + if (mmcsr == MMCSR__DA_MATCH) { + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void *) address); + return 1; + + case MMCSR__IA_MATCH: + rwcsr(WCSR, CSR_IA_MATCH, 0); //clear ia_match + return 1; + case MMCSR__IDA_MATCH: + rwcsr(WCSR, CSR_IDA_MATCH, 0); //clear ida_match + return 1; + } + + return 0; +} + +void restore_da_match_after_sched(void) +{ + unsigned long dc_ctl_mode; + unsigned long dc_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + + rwcsr(WCSR, CSR_DA_MATCH, 0); + rwcsr(WCSR, CSR_DA_MASK, pcb->da_mask); + rwcsr(WCSR, CSR_DA_MATCH, pcb->da_match); + dc_ctl_mode = pcb->dc_ctl; + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + dc_ctl |= ((dc_ctl_mode << DV_MATCH_EN_S) & ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S))); + if (dc_ctl_mode & 0x1) { + rwcsr(WCSR, CSR_DV_MATCH, pcb->dv_match); + rwcsr(WCSR, CSR_DV_MASK, pcb->dv_mask); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } +} + +#elif defined(CONFIG_SUBARCH_C4) +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + kernel_siginfo_t info; + unsigned long match_ctl, ia_match; + sigval_t sw64_value; + + pr_info("%s: pid %d, name = %s, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + case MMCSR__IA_MATCH: + case MMCSR__IDA_MATCH: + case MMCSR__IV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + return 1; + } + + info.si_signo = SIGTRAP; + info.si_addr = (void *) address; + sw64_value.sival_ptr = (void *)(regs->pc); + info.si_value = sw64_value; + info.si_code = TRAP_HWBKPT; + + if (mmcsr == MMCSR__DA_MATCH) { + info.si_errno = 1; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + info.si_errno = 2; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + info.si_errno = 3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + info.si_errno = 4; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + info.si_errno = 5; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + info.si_errno = 6; + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_info(&info); + return 1; + } + + return 0; +} + +/* + *pcb->match_ctl: + * [0] DA_MATCH + * [1] DV_MATCH + * [2] DAV_MATCH + * [3] IA_MATCH + * [4] IV_MATCH + * [5] IDA_MATCH + * [8:9] match_ctl_mode + * + */ +#define DA_MATCH 0x1 +#define DV_MATCH 0x2 +#define DAV_MATCH 0x4 +#define IA_MATCH 0x8 +#define IV_MATCH 0x10 +#define IDA_MATCH 0x20 + +void restore_da_match_after_sched(void) +{ + unsigned long match_ctl_mode; + unsigned long match_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + unsigned long vpn, upn; + + if (!pcb->match_ctl) + return; + pr_info("Restroe MATCH status, pid: %d\n", current->pid); + + if (pcb->match_ctl & DA_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx match_ctl:%#lx\n", pcb->da_match, pcb->da_mask, match_ctl); + } + + if (pcb->match_ctl & DV_MATCH) { + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (0x1UL << DV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & DAV_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + write_csr(0xfffffffff, CSR_DA_MATCH_MODE); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", + pcb->da_match, pcb->da_mask, pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & IA_MATCH) { + pcb->ia_match |= (0x1UL << IA_MATCH_EN_S) | 0x3; + pcb->ia_mask |= 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->ia_mask, CSR_IA_MASK); + vpn = read_csr(CSR_VPCR) >> 44; + vpn &= 0x3ff; + upn = read_csr(CSR_UPCR); + upn &= 0x3ff; + write_csr(((0x3ff << 18) | vpn), CSR_IA_VPNMATCH); + write_csr(((0x3ff << 18) | upn), CSR_IA_UPNMATCH); + pr_info("ia_match:%#lx ia_mask:%#lx\n", pcb->ia_match, pcb->ia_mask); + } + if (pcb->match_ctl & IV_MATCH) { + pcb->ia_match |= (0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S) | 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->iv_match, CSR_IV_MATCH); + pr_info("ia_match:%#lx iv_match:%#lx\n", pcb->ia_match, pcb->iv_match); + } + if (pcb->match_ctl & IDA_MATCH) { + pcb->ida_match |= (0x1UL << IDA_MATCH_EN_S) | 0x3; + pcb->ida_mask |= 0x3; + write_csr(pcb->ida_match, CSR_IDA_MATCH); + write_csr(pcb->ida_mask, CSR_IDA_MASK); + pr_info("ida_match:%#lx ida_mask:%#lx\n", pcb->ida_match, pcb->ida_mask); + } +} +#endif + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define GPR_OFFSET_NAME(r) { \ + .name = "r" #r, \ + .offset = offsetof(struct pt_regs, regs[r]) \ +} + +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ + .offset = offsetof(struct pt_regs, r) \ +} + +#define REG_OFFSET_END { \ + .name = NULL, \ + .offset = 0 \ +} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(ps), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} -- Gitee From d2b8014f90df3bc8110fbf0730ccece696abaf37 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Tue, 9 Jan 2024 12:27:52 +0800 Subject: [PATCH 296/953] anolis: sw64: add hardware match support ANBZ: #4688 Add hardware match mechanism for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/match.c | 551 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 551 insertions(+) create mode 100644 arch/sw_64/kernel/match.c diff --git a/arch/sw_64/kernel/match.c b/arch/sw_64/kernel/match.c new file mode 100644 index 000000000000..3926391270da --- /dev/null +++ b/arch/sw_64/kernel/match.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + + +char da_match_buf[1024], dv_match_buf[1024], dav_match_buf[1024]; +char ia_match_buf[1024], iv_match_buf[1024], ida_match_buf[1024]; + +unsigned long da_match_cf1, da_match_cf2, da_match_cf3; +unsigned long dv_match_cf1, dv_match_cf2, dv_match_cf3; +unsigned long dav_match_cf1, dav_match_cf2, dav_match_cf3, + dav_match_cf4, dav_match_cf5; +unsigned long ia_match_cf1, ia_match_cf2, ia_match_cf3, ia_match_cf4; +unsigned long iv_match_cf1, iv_match_cf2; +unsigned long ida_match_cf1, ida_match_cf2; + +static int da_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", da_match_buf); + return 0; +} + +static int dv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dv_match_buf); + return 0; +} + +static int dav_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dav_match_buf); + return 0; +} + +static int ia_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ia_match_buf); + return 0; +} + +static int iv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", iv_match_buf); + return 0; +} + +static int ida_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ida_match_buf); + return 0; +} + +static int da_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, da_match_show, NULL); +} + +static int dv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dv_match_show, NULL); +} + +static int dav_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dav_match_show, NULL); +} + +static int ia_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ia_match_show, NULL); +} + +static int iv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, iv_match_show, NULL); +} + +static int ida_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ida_match_show, NULL); +} + +static void +write_da_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(da_match_cf1, CSR_DA_MATCH); + write_csr(da_match_cf2, CSR_DA_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= da_match_cf3; + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dv_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dv_match_cf1, CSR_DV_MATCH); + write_csr(dv_match_cf2, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | dv_match_cf3); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dav_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dav_match_cf1, CSR_DA_MATCH); + write_csr(dav_match_cf2, CSR_DA_MASK); + write_csr(dav_match_cf3, CSR_DV_MATCH); + write_csr(dav_match_cf4, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S) + | dav_match_cf5); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_ia_match(void *i) +{ + ia_match_cf1 |= (0x1UL << IA_MATCH_EN_S); + write_csr_imb(ia_match_cf1, CSR_IA_MATCH); + write_csr_imb(ia_match_cf2, CSR_IA_MASK); + write_csr(((0x3ffUL << 18) | ia_match_cf3), CSR_IA_VPNMATCH); + write_csr(((0x3ffUL << 18) | ia_match_cf4), CSR_IA_UPNMATCH); +} + +static void +write_iv_match(void *i) +{ + unsigned long ia_match_tmp; + + ia_match_tmp = read_csr(CSR_IA_MATCH); + ia_match_tmp &= ~(0x1UL << IV_PM_EN_S); + ia_match_tmp |= ((((iv_match_cf2 >> IV_PM_EN_S) & 0x1) << IV_PM_EN_S) + | (iv_match_cf2 & 0x3) | (0x1UL << IV_MATCH_EN_S)); + write_csr_imb(iv_match_cf1, CSR_IV_MATCH); + write_csr_imb(ia_match_tmp, CSR_IA_MATCH); +} + +static void +write_ida_match(void *i) +{ + + ida_match_cf1 |= (0x1UL << IDA_MATCH_EN_S); + write_csr(ida_match_cf1, CSR_IDA_MATCH); + write_csr(ida_match_cf2, CSR_IDA_MASK); +} + +static ssize_t da_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(da_match_buf) - 1, len); + if (copy_from_user(da_match_buf, user_buf, size)) + return -EFAULT; + + da_match_buf[size] = '\0'; + strcpy(tmp, da_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &da_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &da_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &da_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_da_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(dv_match_buf) - 1, len); + if (copy_from_user(dv_match_buf, user_buf, size)) + return -EFAULT; + + dv_match_buf[size] = '\0'; + strcpy(tmp, dv_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dv_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dv_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_dv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dav_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[500]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[500]; + int err; + char *ret = NULL; + + size = min(sizeof(dav_match_buf) - 1, len); + if (copy_from_user(dav_match_buf, user_buf, size)) + return -EFAULT; + + dav_match_buf[size] = '\0'; + strcpy(tmp, dav_match_buf); + p = tmp; + + for (i = 0 ; i < 5; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[500] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dav_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dav_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dav_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &dav_match_cf4); + if (err) + return err; + + err = kstrtoul(&tmp1[400], 0, &dav_match_cf5); + if (err) + return err; + + + if (on_each_cpu(write_dav_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t ia_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ia_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ia_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &ia_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &ia_match_cf4); + if (err) + return err; + + if (on_each_cpu(write_ia_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t iv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &iv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &iv_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_iv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + + +static ssize_t ida_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ida_match_buf) - 1, len); + if (copy_from_user(ida_match_buf, user_buf, size)) + return -EFAULT; + + ida_match_buf[size] = '\0'; + strcpy(tmp, ida_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ida_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ida_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_ida_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static const struct file_operations set_da_match_fops = { + .open = da_match_open, + .read = seq_read, + .write = da_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dv_match_fops = { + .open = dv_match_open, + .read = seq_read, + .write = dv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dav_match_fops = { + .open = dav_match_open, + .read = seq_read, + .write = dav_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_ia_match_fops = { + .open = ia_match_open, + .read = seq_read, + .write = ia_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_iv_match_fops = { + .open = iv_match_open, + .read = seq_read, + .write = iv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + + +static const struct file_operations set_ida_match_fops = { + .open = ida_match_open, + .read = seq_read, + .write = ida_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init match_debugfs_init(void) +{ + struct dentry *match_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + match_entry = debugfs_create_file("da_match", 0600, + sw64_debugfs_dir, NULL, + &set_da_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dv_match", 0600, + sw64_debugfs_dir, NULL, + &set_dv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dav_match", 0600, + sw64_debugfs_dir, NULL, + &set_dav_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ia_match", 0600, + sw64_debugfs_dir, NULL, + &set_ia_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("iv_match", 0600, + sw64_debugfs_dir, NULL, + &set_iv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ida_match", 0600, + sw64_debugfs_dir, NULL, + &set_ida_match_fops); + if (!match_entry) + return -ENOMEM; + + return 0; +} +late_initcall(match_debugfs_init); -- Gitee From 028875e676baf241f9d34db8ad54a3efad096510 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:27 +0800 Subject: [PATCH 297/953] anolis: sw64: add memory management ANBZ: #4688 Add memory management support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cache.h | 13 + arch/sw_64/include/asm/cacheflush.h | 13 + arch/sw_64/include/asm/memory.h | 35 ++ arch/sw_64/include/asm/mmu.h | 10 + arch/sw_64/include/asm/mmu_context.h | 136 ++++ arch/sw_64/include/asm/mmzone.h | 17 + arch/sw_64/include/asm/page.h | 71 +++ arch/sw_64/include/asm/pgalloc.h | 51 ++ arch/sw_64/include/asm/pgtable-4level.h | 32 + arch/sw_64/include/asm/pgtable.h | 789 ++++++++++++++++++++++++ arch/sw_64/include/asm/sparsemem.h | 9 + arch/sw_64/include/asm/tlb.h | 13 + arch/sw_64/include/asm/tlbflush.h | 94 +++ arch/sw_64/include/asm/vmalloc.h | 5 + arch/sw_64/mm/Makefile | 16 + arch/sw_64/mm/extable.c | 25 + arch/sw_64/mm/fault.c | 305 +++++++++ arch/sw_64/mm/init.c | 339 ++++++++++ arch/sw_64/mm/mmap.c | 102 +++ arch/sw_64/mm/physaddr.c | 39 ++ 20 files changed, 2114 insertions(+) create mode 100644 arch/sw_64/include/asm/cache.h create mode 100644 arch/sw_64/include/asm/cacheflush.h create mode 100644 arch/sw_64/include/asm/memory.h create mode 100644 arch/sw_64/include/asm/mmu.h create mode 100644 arch/sw_64/include/asm/mmu_context.h create mode 100644 arch/sw_64/include/asm/mmzone.h create mode 100644 arch/sw_64/include/asm/page.h create mode 100644 arch/sw_64/include/asm/pgalloc.h create mode 100644 arch/sw_64/include/asm/pgtable-4level.h create mode 100644 arch/sw_64/include/asm/pgtable.h create mode 100644 arch/sw_64/include/asm/sparsemem.h create mode 100644 arch/sw_64/include/asm/tlb.h create mode 100644 arch/sw_64/include/asm/tlbflush.h create mode 100644 arch/sw_64/include/asm/vmalloc.h create mode 100644 arch/sw_64/mm/Makefile create mode 100644 arch/sw_64/mm/extable.c create mode 100644 arch/sw_64/mm/fault.c create mode 100644 arch/sw_64/mm/init.c create mode 100644 arch/sw_64/mm/mmap.c create mode 100644 arch/sw_64/mm/physaddr.c diff --git a/arch/sw_64/include/asm/cache.h b/arch/sw_64/include/asm/cache.h new file mode 100644 index 000000000000..6a6ce4e99265 --- /dev/null +++ b/arch/sw_64/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm/cache.h + */ +#ifndef _ASM_SW64_CACHE_H +#define _ASM_SW64_CACHE_H + +#define L1_CACHE_SHIFT 7 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_SW64_CACHE_H */ diff --git a/arch/sw_64/include/asm/cacheflush.h b/arch/sw_64/include/asm/cacheflush.h new file mode 100644 index 000000000000..0d49830b8493 --- /dev/null +++ b/arch/sw_64/include/asm/cacheflush.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CACHEFLUSH_H +#define _ASM_SW64_CACHEFLUSH_H + +/* + * DCache: PIPT + * ICache: + * - C3B is VIVT with ICTAG, support coherence. + * - C4 is VIPT + */ +#include + +#endif /* _ASM_SW64_CACHEFLUSH_H */ diff --git a/arch/sw_64/include/asm/memory.h b/arch/sw_64/include/asm/memory.h new file mode 100644 index 000000000000..b2b7492ae477 --- /dev/null +++ b/arch/sw_64/include/asm/memory.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MEMORY_H +#define _ASM_SW64_MEMORY_H + +#ifdef CONFIG_NUMA +#include +#endif + +#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30) +#define NODE0_START (_TEXT_START - __START_KERNEL_map) + +#define MAX_PHYSMEM_BITS 48 + +struct mem_desc_t { + unsigned long phys_base; /* start address of physical memory */ + unsigned long phys_size; /* size of physical memory */ + phys_addr_t base; /* start address of memory managed by kernel */ + phys_addr_t size; /* size of memory managed by kernel */ +}; +extern struct mem_desc_t mem_desc; + +struct numa_node_desc_t { + phys_addr_t base; + phys_addr_t size; +}; +extern struct numa_node_desc_t numa_nodes_desc[]; + +void __init callback_init(void); +void __init mem_detect(void); +void __init sw64_memblock_init(void); +void __init zone_sizes_init(void); +void __init sw64_numa_init(void); +void __init sw64_memory_present(void); + +#endif /* _ASM_SW64_MEMORY_H */ diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h new file mode 100644 index 000000000000..f24219fac654 --- /dev/null +++ b/arch/sw_64/include/asm/mmu.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_H +#define _ASM_SW64_MMU_H + +/* The sw64 MMU context is one "unsigned long" bitmap per CPU*/ +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; +#endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h new file mode 100644 index 000000000000..420ad5f745be --- /dev/null +++ b/arch/sw_64/include/asm/mmu_context.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_CONTEXT_H +#define _ASM_SW64_MMU_CONTEXT_H + +#include + +#include +#include + +/* + * The maximum ASID's the processor supports. + */ + +#if defined(CONFIG_SUBARCH_C3B) || defined(CONFIG_SUBARCH_C4) +#define ASID_BITS 10 +#endif + +#include +#define last_asid(cpu) (cpu_data[cpu].last_asid) + +#define ASID_FIRST_VERSION (1UL << ASID_BITS) +#define ASID_MASK ((1UL << ASID_BITS) - 1) + +#define cpu_asid(cpu, mm) ((mm)->context.asid[cpu] & ASID_MASK) + +static inline bool asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + return !((mm->context.asid[cpu] ^ last_asid(cpu)) & ~ASID_MASK); +} + +/* + * NOTE! The way this is set up, the high bits of the "last_asid" (and + * the "mm->context.asid[cpu]") are the ASID _version_ code. A version + * of 0 is always considered invalid, so to invalidate another process + * you only need to do "p->mm->context.asid[cpu] = 0". + * + * If we need more ASID's than the processor has, we invalidate the old + * user TLB's (tbivp()) and start a new ASID version. That will force a + * new asid for any other processes the next time they want to run. + */ + +static inline void __get_new_mm_context(struct mm_struct *mm, long cpu) +{ + unsigned long asid = last_asid(cpu); + + if (!(++asid & ASID_MASK)) + tbivp(); + mm->context.asid[cpu] = last_asid(cpu) = asid; + +} + +static inline void +switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* Check if our ASID is of an older version, and thus invalid. */ + unsigned long asid, ptbr; + long cpu = smp_processor_id(); + + if (!asid_valid(next_mm, cpu)) + __get_new_mm_context(next_mm, cpu); + + /* Update CSR:UPN and CSR:PTBR. Another thread may have allocated + * a new mm->context[asid] (via flush_tlb_mm) without the ASID serial + * number wrapping. We have no way to detect when this is needed. + */ + asid = cpu_asid(cpu, next_mm); + ptbr = virt_to_pfn(next_mm->pgd); + load_mm(asid, ptbr); + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev_mm, next_mm, tsk); + local_irq_restore(flags); +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(tsk, mm) do { } while (0) + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + mm->context.asid[i] = 0; + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, + bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_SW64_MMU_CONTEXT_H */ diff --git a/arch/sw_64/include/asm/mmzone.h b/arch/sw_64/include/asm/mmzone.h new file mode 100644 index 000000000000..363e2bc98a95 --- /dev/null +++ b/arch/sw_64/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMZONE_H +#define _ASM_SW64_MMZONE_H + +#include + +/* + * Following are macros that are specific to this numa platform. + */ + +extern pg_data_t *node_data[]; + +#ifdef CONFIG_NUMA +#define NODE_DATA(nid) (node_data[(nid)]) +#endif + +#endif /* _ASM_SW64_MMZONE_H */ diff --git a/arch/sw_64/include/asm/page.h b/arch/sw_64/include/asm/page.h new file mode 100644 index 000000000000..68b4f2fc1b48 --- /dev/null +++ b/arch/sw_64/include/asm/page.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PAGE_H +#define _ASM_SW64_PAGE_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void clear_page(void *page); +#define clear_user_page(page, vaddr, pg) clear_page(page) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +extern void copy_page(void *_to, void *_from); +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +typedef struct page *pgtable_t; + +extern unsigned long __phys_addr(unsigned long addr); +#ifdef CONFIG_SUBARCH_C3B +extern unsigned long __boot_phys_addr(unsigned long addr); +#else +#define __boot_phys_addr(x) __phys_addr(x) +#endif + +#endif /* !__ASSEMBLY__ */ + +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) + +#include + +#define __START_KERNEL_map PAGE_OFFSET + +#define __pa(x) __phys_addr((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET)) + +#define __boot_pa(x) __boot_phys_addr((unsigned long)(x)) +#define __boot_va(x) __va(x) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn))) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif /* CONFIG_FLATMEM */ + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC +#include +#include +#endif + +#endif /* _ASM_SW64_PAGE_H */ diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h new file mode 100644 index 000000000000..1cc03e3be5b6 --- /dev/null +++ b/arch/sw_64/include/asm/pgalloc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGALLOC_H +#define _ASM_SW64_PGALLOC_H + +#include +#include +#include /* for pte_{alloc,free}_one */ + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + unsigned long pfn = page_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define check_pgt_cache() do { } while (0) + +#endif /* _ASM_SW64_PGALLOC_H */ diff --git a/arch/sw_64/include/asm/pgtable-4level.h b/arch/sw_64/include/asm/pgtable-4level.h new file mode 100644 index 000000000000..719e2c5377e3 --- /dev/null +++ b/arch/sw_64/include/asm/pgtable-4level.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_4LEVEL_H +#define _ASM_SW64_PGTABLE_4LEVEL_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pud_val(x) ((x).pud) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pmd(x) ((pmd_t) { (x) }) +#define __pud(x) ((pud_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) +#endif /* !__ASSEMBLY__ */ + +#define PAGE_OFFSET 0xfff0000000000000 + +#endif +#endif /* _ASM_SW64_PGTABLE_4LEVEL_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h new file mode 100644 index 000000000000..0b1f825eb74c --- /dev/null +++ b/arch/sw_64/include/asm/pgtable.h @@ -0,0 +1,789 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_H +#define _ASM_SW64_PGTABLE_H + + +#include + +/* + * This file contains the functions and defines necessary to modify and use + * the sw64 page table tree. + * + * This hopefully works with any standard sw64 page-size, as defined + * in (currently 8192). + */ +#include +#include + +#include +#include +#include /* For TASK_SIZE */ +#include + +struct mm_struct; +struct vm_area_struct; + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} +/* PGDIR_SHIFT determines what a forth-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* PUD_SHIFT determines the size of the area a third-level page table can map */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) + +#define CONT_PMD_SHIFT 6 +#define CONT_PMDS (1 << CONT_PMD_SHIFT) +#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) +#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) + +/* + * Entries per page directory level: the sw64 is three-level, with + * all levels having a one-page page table. + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PUD (1UL << (PAGE_SHIFT - 3)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* Number of pointers that fit on a page: this will go away. */ +#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT - 3)) + +#define VMALLOC_START (-2 * PGDIR_SIZE) +#ifndef CONFIG_SPARSEMEM_VMEMMAP +#define VMALLOC_END (-PGDIR_SIZE) +#else +#define VMEMMAP_END (-PGDIR_SIZE) +#define vmemmap ((struct page *)VMEMMAP_END - (1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT))) +#define VMALLOC_END ((unsigned long)vmemmap) +#endif + +/* + * HMcode-imposed page table bits + */ +#if defined(CONFIG_SUBARCH_C3B) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_CONT 0x0020 /* used for 256M page size bit */ +#define _PAGE_LEAF 0x0040 /* used for 8M page size bit */ +#define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 +#define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0800 /* xxx */ +#define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x8000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 + +#define _PAGE_SPLITTING 0x200000 /* For Transparent Huge Page */ +#define _PAGE_DEVMAP 0x400000 /* For ZONE DEVICE page */ + +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 18 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 21 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 22 /* bit of _PAGE_DEVMAP */ +/* + * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly + * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. + * Under Linux/sw64, the "accessed" bit just means "read", and I'll just use + * the KRE/URE bits to watch for it. That way we don't need to overload the + * KWE/UWE bits with both handling dirty and accessed. + * + * Note that the kernel uses the accessed bit just to check whether to page + * out a page or not, so it doesn't have to be exact anyway. + */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) +#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) + +#define _PFN_SHIFT 28 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) + +#define page_valid_kern(x) (0) + +#elif defined(CONFIG_SUBARCH_C4) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_FIXED 0x0010 +#define _PAGE_CONT 0x0020 /* used for 512M page size bit*/ +#define _PAGE_LEAF 0x0040 /* used for huge page bit */ +#define _PAGE_PCD 0x0080 /* used for page cache disabled */ + +/* and these are sw definition */ +#define _PAGE_WCD 0x0100 +#define _PAGE_ACCESSED 0x0200 +#define _PAGE_SPLITTING 0x0400 /* For Transparent Huge Page */ +#define _PAGE_SPECIAL 0x0800 +#define _PAGE_DEVMAP 0x1000 /* For ZONE DEVICE page */ +#define _PAGE_KERN 0x2000 +#define _PAGE_DIRTY _BITUL(62) +#define _PAGE_PROTNONE _BITUL(63) +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 9 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 10 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 12 /* bit of _PAGE_DEVMAP */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS _PAGE_DIRTY +#define __ACCESS_BITS _PAGE_ACCESSED + +#define _PFN_SHIFT 24 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_LEAF | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF | (x)) + +#define page_valid_kern(x) ((x & (_PAGE_VALID | _PAGE_KERN)) == (_PAGE_VALID | _PAGE_KERN)) +#endif + +#define PFN_PTE_SHIFT _PFN_SHIFT + +#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define _PFN_MASK (GENMASK(_PFN_BITS - 1, 0) << _PFN_SHIFT) + +#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL | _PAGE_LEAF | _PAGE_CONT) + +#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW) +#define _PAGE_S(x) _PAGE_NORMAL(x) + +/* + * pgprot_noncached() is only for infiniband pci support, and a real + * implementation for RAM would be more complicated. + */ +#define pgprot_noncached(prot) (prot) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + + if (page_valid_kern(pte_val(pteval))) { + mb(); + if ((pte_val(pteval) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + pte_t pte; + + pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pte; +} + +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pmd; +} +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) +{ + pud_t pud; + + pud_val(pud) = (pfn << _PFN_SHIFT) | pgprot_val(pgprot); + return pud; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pmd; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define p4d_pfn(p4d) ((p4d_val(p4d) & _PFN_MASK) >> _PFN_SHIFT) +#define pud_pfn(pud) ((pud_val(pud) & _PFN_MASK) >> _PFN_SHIFT) +#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & _PFN_MASK) >> _PFN_SHIFT) + +#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) + +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pfn_to_virt(pud_pfn(pud)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)pfn_to_virt(pud_pfn(pud)); +} + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)pfn_to_virt(pmd_pfn(pmd)); +} + +static inline int pte_none(pte_t pte) +{ + return !pte_val(pte); +} + +static inline int pte_valid(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_VALID); +} + +static inline int pte_present(pte_t pte) +{ + return !!(pte_val(pte) & (_PAGE_VALID | _PAGE_PROTNONE)); +} + +static inline int pte_huge(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_LEAF); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) + +static inline int pmd_none(pmd_t pmd) +{ + return !pmd_val(pmd); +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because + * split_huge_page will temporarily clear the valid bit (but + * the _PAGE_LEAF flag will remain set at all times while the + * _PAGE_VALID bit is clear). + */ + return !!(pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE | _PAGE_LEAF)); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = 0; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DIRTY); +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !(pmd_val(pmd) & _PAGE_FOW); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_VALID | _PAGE_PROTNONE); + return pmd; +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__DIRTY_BITS); + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__ACCESS_BITS); + return pmd; +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= __DIRTY_BITS; + return pmd; +} + +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_DEVMAP; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= __ACCESS_BITS; + return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_LEAF; + return pmd; +} + +static inline pmd_t pmd_mkcont(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_CONT; + return pmd; +} + +static inline int pud_none(pud_t pud) +{ + return !pud_val(pud); +} + +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pud_present(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_VALID); +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = 0; +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + pud_val(pud) |= _PAGE_LEAF; + return pud; +} + +static inline int p4d_none(p4d_t p4d) +{ + return !p4d_val(p4d); +} + +static inline int p4d_bad(p4d_t p4d) +{ + return (p4d_val(p4d) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int p4d_present(p4d_t p4d) +{ + return !!(p4d_val(p4d) & _PAGE_VALID); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = 0; +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_FOW); +} + +static inline int pte_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_ACCESSED); +} + +static inline int pte_special(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SPECIAL); +} + +static inline int pte_cont(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_CONT); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(__DIRTY_BITS); + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(__ACCESS_BITS); + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= __DIRTY_BITS; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= __ACCESS_BITS; + return pte; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_LEAF; + return pte; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +static inline pte_t pte_mkdevmap(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} +#endif + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t a) +{ + return (pte_val(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_SPLITTING); +} + +static inline int pmd_trans_cont(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_CONT); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_LEAF); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DEVMAP); +} + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_DEVMAP); +} +#else +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +#endif + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long pmd_val = xchg(&pmdp->pmd, 0); + pmd_t pmd = (pmd_t){pmd_val}; + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp); +} + +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot)) + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +extern pgd_t swapper_pg_dir[1024]; + +/* + * The sw64 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +#define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ +} + +#if defined(CONFIG_SUBARCH_C3B) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bit 7: _PAGE_PROTNONE (must be zero) + * bits 8-15: swap type + * bits 16-63: swap offset + */ +#define __SWP_TYPE_SHIFT 8 +#define __SWP_TYPE_BITS 8 + +#elif defined(CONFIG_SUBARCH_C4) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bits 7-11: swap type + * bits 12-58: swap offset + * bit 63: _PAGE_PROTNONE (must be zero) + */ +#define __SWP_TYPE_SHIFT 7 +#define __SWP_TYPE_BITS 5 + +#endif + +#define __SWP_OFFSET_BITS 47 +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +#define kern_addr_valid(addr) (1) + +#define pte_ERROR(e) \ + pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s: %d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s: %d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s: %d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +extern void paging_init(void); + +/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* _ASM_SW64_PGTABLE_H */ diff --git a/arch/sw_64/include/asm/sparsemem.h b/arch/sw_64/include/asm/sparsemem.h new file mode 100644 index 000000000000..a60e757f3838 --- /dev/null +++ b/arch/sw_64/include/asm/sparsemem.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPARSEMEM_H +#define _ASM_SW64_SPARSEMEM_H + +#include + +#define SECTION_SIZE_BITS 28 + +#endif /* _ASM_SW64_SPARSEMEM_H */ diff --git a/arch/sw_64/include/asm/tlb.h b/arch/sw_64/include/asm/tlb.h new file mode 100644 index 000000000000..08c8f4f97de1 --- /dev/null +++ b/arch/sw_64/include/asm/tlb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLB_H +#define _ASM_SW64_TLB_H + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) + +#endif /* _ASM_SW64_TLB_H */ diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h new file mode 100644 index 000000000000..73995d9663a6 --- /dev/null +++ b/arch/sw_64/include/asm/tlbflush.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLBFLUSH_H +#define _ASM_SW64_TLBFLUSH_H + +#include +#include +#include +#include +#include +#include +#include + +static inline void local_flush_tlb_all(void) +{ + tbiv(); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + + cpu = smp_processor_id(); + if (!asid_valid(mm, cpu)) { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + goto out; + } + + if (current->mm == mm) { + __get_new_mm_context(mm, cpu); + wrasid(cpu_asid(cpu, mm)); + } else { + mm->context.asid[cpu] = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +out: + local_irq_restore(flags); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int cpu; + struct mm_struct *mm; + + cpu = smp_processor_id(); + mm = vma->vm_mm; + + if (asid_valid(mm, cpu)) + tbisasid(cpu_asid(cpu, mm), addr); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +} + +/* + * It flushes the whole user tlb now. + */ +static inline void +local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + local_flush_tlb_mm(vma->vm_mm); +} + +/* + * There is no way to invalidate kernel pages only, so it has to + * inlvalidate all mapping. + */ +static inline void +local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + local_flush_tlb_all(); +} + + +#ifdef CONFIG_SMP +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#else +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma, start, end) +#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, end) + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_SW64_TLBFLUSH_H */ diff --git a/arch/sw_64/include/asm/vmalloc.h b/arch/sw_64/include/asm/vmalloc.h new file mode 100644 index 000000000000..a76d1133d6c6 --- /dev/null +++ b/arch/sw_64/include/asm/vmalloc.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VMALLOC_H +#define _ASM_SW64_VMALLOC_H + +#endif /* _ASM_SW64_VMALLOC_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile new file mode 100644 index 000000000000..8b9d6e4d2ebf --- /dev/null +++ b/arch/sw_64/mm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux sw_64-specific parts of the memory manager. +# + +#ccflags-y := -Werror + +obj-y := init.o fault.o physaddr.o mmap.o extable.o + +obj-$(CONFIG_NUMA) += numa.o +ifeq ($(CONFIG_SUBARCH_C4),y) +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage_c4.o +else +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +endif +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += thp.o diff --git a/arch/sw_64/mm/extable.c b/arch/sw_64/mm/extable.c new file mode 100644 index 000000000000..d2678e12a1b1 --- /dev/null +++ b/arch/sw_64/mm/extable.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +int fixup_exception(struct pt_regs *regs, unsigned long pc) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(pc); + if (fixup) { + unsigned int valreg = fixup->fixup.bits.valreg; + unsigned int errreg = fixup->fixup.bits.errreg; + + if (valreg != 31) + regs->regs[valreg] = 0; + if (errreg != 31) + regs->regs[errreg] = -EFAULT; + pc += fixup->fixup.bits.nextinsn; + regs->pc = pc; + + return 1; + } + return 0; +} diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c new file mode 100644 index 000000000000..e76560a7edca --- /dev/null +++ b/arch/sw_64/mm/fault.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include + +#include + +__read_mostly bool segv_debug_enabled; + +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + int ret = 0; + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, mmcsr)) + ret = 1; + preempt_enable(); + } + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + return 0; +} +#endif + +extern void die(char *, struct pt_regs *, long); +extern void show_regs(struct pt_regs *regs); + +void show_all_vma(void) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + MA_STATE(mas, 0, 0, 0); + + if (!mm) + return; + + mas.tree = &mm->mm_mt; + + for (int i = 0; (vma = mas_find(&mas, ULONG_MAX)) != NULL; i++) { + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + struct file *file = vma->vm_file; + + if (file) + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, file = %s, name = %s\n", + i, start, end, (end - start), vma->vm_flags, + file->f_path.dentry->d_name.name, current->comm); + else + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, name = %s\n", + i, start, end, (end - start), vma->vm_flags, current->comm); + } +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to handle_mm_fault(). + * + * mmcsr: + * 0 = translation not valid + * 1 = access violation + * 2 = fault-on-read + * 3 = fault-on-execute + * 4 = fault-on-write + * + * cause: + * -1 = instruction fetch + * 0 = load + * 1 = store + * + * Registers $9 through $15 are saved in a block just prior to `regs' and + * are saved and restored around the call to allow exception code to + * modify them. + */ + +unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd = NULL; + p4d_t *p4d = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + unsigned long ret = 0UL; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx\n", addr, pgd_val(*pgd)); + goto out; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, p4d = %#lx\n", + addr, pgd_val(*pgd), p4d_val(*p4d)); + goto out; + } + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud)); + goto out; + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud), pmd_val(*pmd)); + goto out; + + } + pte = pte_offset_map(pmd, addr); + if (pte_present(*pte)) { + ret = (unsigned long)pfn_to_virt(pte_pfn(*pte)); + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx, pte = %#lx, ret = %#lx\n", + addr, *(unsigned long *)pgd, *(unsigned long *)pud, + *(unsigned long *)pmd, *(unsigned long *)pte, ret); + } +out: + return ret; +} + +extern int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs); + +asmlinkage void notrace +do_page_fault(unsigned long address, unsigned long mmcsr, + long cause, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + int si_code = SEGV_MAPERR; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_DEFAULT; + + if (notify_page_fault(regs, mmcsr)) + return; + + if (unlikely(mmcsr >= MMCSR__DA_MATCH)) { + if (do_match(address, mmcsr, cause, regs) == 1) + return; + } + + if (unlikely(mmcsr == MMCSR__ACV1)) { + if (!user_mode(regs)) + goto no_context; + else { + mmap_read_unlock(mm); + goto bad_area; + } + } + + /* + * If we're in an interrupt context, or have no user context, + * we must not take the fault. + */ + if (!mm || faulthandler_disabled()) + goto no_context; + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +retry: + vma = lock_mm_and_find_vma(mm, address, regs); + if (!vma) + goto bad_area_nosemaphore; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it. + */ + si_code = SEGV_ACCERR; + if (cause < 0) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + current->maj_flt++; + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + current->min_flt++; + } + + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + + mmap_read_unlock(mm); + + return; + + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + bad_area: + mmap_read_unlock(mm); + + bad_area_nosemaphore: + if (user_mode(regs)) + goto do_sigsegv; + + no_context: + /* Are we prepared to handle this fault as an exception? */ + if (fixup_exception(regs, regs->pc)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + pr_alert("Unable to handle kernel paging request at virtual address %016lx\n", + address); + die("Oops", regs, cause); + make_task_dead(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that + * made us unable to handle the page fault gracefully. + */ + out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; + + do_sigbus: + mmap_read_unlock(mm); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); + if (!user_mode(regs)) + goto no_context; + return; + + do_sigsegv: + force_sig_fault(SIGSEGV, si_code, (void __user *) address); + + if (unlikely(segv_debug_enabled)) { + pr_info("fault: want to send_segv: pid %d, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + current->pid, cause, mmcsr, address, regs->pc); + show_regs(regs); + show_all_vma(); + } +} diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c new file mode 100644 index 000000000000..ca761b602ab6 --- /dev/null +++ b/arch/sw_64/mm/init.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +/* 2.3.x zone allocator, 1999 Andrea Arcangeli */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct mem_desc_t mem_desc; +#ifndef CONFIG_NUMA +struct numa_node_desc_t numa_nodes_desc[1]; +#endif /* CONFIG_NUMA */ + +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); +pg_data_t *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); + +pgd_t swapper_pg_dir[1024] __aligned(PAGE_SIZE); +static pud_t vmalloc_pud[1024] __aligned(PAGE_SIZE); + +static phys_addr_t mem_start; +static phys_addr_t mem_size_limit; + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +unsigned long memory_block_size_bytes(void) +{ + if (is_in_guest()) + return MIN_MEMORY_BLOCK_SIZE_VM_MEMHP; + else + return MIN_MEMORY_BLOCK_SIZE; +} +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static int __init setup_mem_size(char *p) +{ + char *oldp; + unsigned long start, size; + + start = 0; + oldp = p; + size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') + start = memparse(p + 1, &p); + + mem_start = start; + mem_size_limit = size; + return 0; +} +early_param("mem", setup_mem_size); + +#if defined(CONFIG_SUBARCH_C3B) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + init = pgd_offset(&init_mm, 0UL); + if (ret) + pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); + + return ret; +} +#elif defined(CONFIG_SUBARCH_C4) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + return ret; +} +#endif + +/* Set up initial PCB, VPTB, and other such nicities. */ + +static inline void +switch_to_system_map(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); + update_ptbr_sys(virt_to_phys(swapper_pg_dir)); + tbiv(); +} + +void __init callback_init(void) +{ + pgd_t *pgd; + p4d_t *p4d; + + switch_to_system_map(); + + /* Allocate one PGD and one PUD. */ + pgd = pgd_offset_k(VMALLOC_START); + p4d = p4d_offset(pgd, VMALLOC_START); + p4d_populate(&init_mm, p4d, (pud_t *)vmalloc_pud); +} + +void __init zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +/* + * paging_init() sets up the memory map. + */ +void __init paging_init(void) +{ +} + +void __init mem_detect(void) +{ + int i; + + mem_desc.phys_base = 0; + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) + mem_desc.phys_size += socket_desc[i].socket_mem; + } + + if (mem_start >= NODE0_START) { + mem_desc.base = mem_start; + } else { + mem_desc.base = NODE0_START; + mem_size_limit -= NODE0_START - mem_start; + } + + if (mem_size_limit && mem_size_limit < mem_desc.phys_size - NODE0_START) + mem_desc.size = mem_size_limit; + else + mem_desc.size = mem_desc.phys_size - NODE0_START; +} + +void __init sw64_memblock_init(void) +{ + memblock_add(mem_desc.base, mem_desc.size); + + memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); + + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + memblock_allow_resize(); + memblock_initialized = true; + process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __boot_pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); +} + +#ifndef CONFIG_NUMA +void __init sw64_numa_init(void) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + memblock_set_node(mem_desc.base, mem_desc.size, &memblock.memory, 0); + nd_pa = memblock_phys_alloc(nd_size, SMP_CACHE_BYTES); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != 0) + pr_info("NODE_DATA(%d) on node %d\n", 0, tnid); + + node_data[0] = nd; + memset(NODE_DATA(0), 0, sizeof(pg_data_t)); + NODE_DATA(0)->node_id = 0; + NODE_DATA(0)->node_start_pfn = mem_desc.base >> PAGE_SHIFT; + NODE_DATA(0)->node_spanned_pages = mem_desc.size >> PAGE_SHIFT; + node_set_online(0); +} +#endif /* CONFIG_NUMA */ + +void __init +mem_init(void) +{ + set_max_mapnr(max_low_pfn); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +#ifdef CONFIG_SWIOTLB + swiotlb_init(true, SWIOTLB_VERBOSE); +#endif + memblock_free_all(); +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node, altmap); +} + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +} +#endif + +#ifdef CONFIG_HAVE_MEMBLOCK +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + const u64 phys_offset = MIN_MEMBLOCK_ADDR; + + if (acpi_disabled) { + if (!PAGE_ALIGNED(base)) { + if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } + size &= PAGE_MASK; + + if (base > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base < phys_offset) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + base, phys_offset); + size -= phys_offset - base; + base = phys_offset; + } + memblock_add(base, size); + } else + return; +} +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sw_64/mm/mmap.c b/arch/sw_64/mm/mmap.c new file mode 100644 index 000000000000..a7a189fc36d6 --- /dev/null +++ b/arch/sw_64/mm/mmap.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + unsigned long limit; + + /* Support 32 bit heap. */ + if (current->personality & ADDR_LIMIT_32BIT) + limit = 0x80000000; + else + limit = TASK_SIZE; + + if (len > limit) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (addr + len > TASK_SIZE) + return -EINVAL; + + return addr; + } + + if (addr) { + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = pgoff << PAGE_SHIFT; + + return vm_unmapped_area(&info); +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + /* 8MB for 32bit, 256MB for 64bit */ + if (current->personality & ADDR_LIMIT_32BIT) + rnd = get_random_long() & 0x7ffffful; + else + rnd = get_random_long() & 0xffffffful; + + return rnd << PAGE_SHIFT; +} + +/* + * This function, called very early during the creation of a new process VM + * image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + /* + * Fall back to the standard layout if the personality bit is set, or + * if the expected stack growth is unlimited: + */ + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; +} + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + unsigned long ret = -EINVAL; + + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + out: + return ret; +} diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c new file mode 100644 index 000000000000..3c6ecb8ee86a --- /dev/null +++ b/arch/sw_64/mm/physaddr.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +unsigned long __phys_addr(unsigned long addr) +{ + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + return addr; +} +EXPORT_SYMBOL(__phys_addr); + +bool __virt_addr_valid(unsigned long addr) +{ + if (addr < PAGE_OFFSET) + return false; + addr &= ~PAGE_OFFSET; + return pfn_valid(addr >> PAGE_SHIFT); +} +EXPORT_SYMBOL(__virt_addr_valid); + +#ifdef CONFIG_SUBARCH_C3B +#define LEGACY_BOOT_VA 0xffffffff80000000 +unsigned long __boot_phys_addr(unsigned long addr) +{ + if (addr >= LEGACY_BOOT_VA) { + addr &= ~LEGACY_BOOT_VA; + VIRTUAL_BUG_ON(addr >= KERNEL_IMAGE_SIZE); + } else { + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + } + return addr; +} +#endif -- Gitee From c264d1aa881220524b058b9be3e06039ac07cc4e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:14 +0800 Subject: [PATCH 298/953] anolis: sw64: add hugetlb support ANBZ: #4688 Add hugetlb support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hugetlb.h | 43 +++ arch/sw_64/mm/hugetlbpage.c | 313 +++++++++++++++++++++ arch/sw_64/mm/hugetlbpage_c4.c | 452 +++++++++++++++++++++++++++++++ arch/sw_64/mm/thp.c | 55 ++++ 4 files changed, 863 insertions(+) create mode 100644 arch/sw_64/include/asm/hugetlb.h create mode 100644 arch/sw_64/mm/hugetlbpage.c create mode 100644 arch/sw_64/mm/hugetlbpage_c4.c create mode 100644 arch/sw_64/mm/thp.c diff --git a/arch/sw_64/include/asm/hugetlb.h b/arch/sw_64/include/asm/hugetlb.h new file mode 100644 index 000000000000..f4c8cbe0891a --- /dev/null +++ b/arch/sw_64/include/asm/hugetlb.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HUGETLB_H +#define _ASM_SW64_HUGETLB_H + +#include + +#ifdef CONFIG_SUBARCH_C4 +#define __HAVE_ARCH_HUGE_PTE_CLEAR +extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +extern void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t pte, int dirty); + +#define arch_make_huge_pte arch_make_huge_pte +extern pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags); + +#define set_huge_swap_pte_at set_huge_swap_pte_at +extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); +#endif + +#include + +#endif /* _ASM_SW64_HUGETLB_H */ diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c new file mode 100644 index 000000000000..fae1fa8bf7df --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 Huge TLB Page Support for Kernel. + */ + +#include +#include +#include +#include + +#include +#include + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_VALID | _PAGE_LEAF)) != _PAGE_VALID; +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +pte_t *sw64_256m_hugepte_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) +{ + int i; + struct page *page; + pmd_t *pmd; + pte_t *pte = NULL; + + pmd = pmd_alloc(mm, pud, addr); + if (pmd == NULL) + return NULL; + + pte = pte_alloc_map(mm, pmd, addr); + if (pte == NULL) + return NULL; + + page = virt_to_page(pte); + pmd_val(*pmd) = pmd_val(*pmd) | _PAGE_LEAF | _PAGE_CONT; + for (i = 1; i < 32; i++) + pmd_val(*(pmd+i)) = pmd_val(*pmd); + return pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) { + if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE << 5)) { + pte = sw64_256m_hugepte_alloc(mm, pud, addr); + } else { + pr_warn("Unsupported page size %lx\n", sz); + return NULL; + } + } + BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + return NULL; + if (pmd_val(*pmd) & _PAGE_CONT) + pte = pte_offset_map(pmd, addr); + else + pte = (pte_t *) pmd; + } + } + } + return pte; +} + +static inline int sw64_huge_pmd_bad(pmd_t pmd) +{ + return !(((pmd_val(pmd) & ~_PFN_MASK) == _PAGE_TABLE) || + ((pmd_val(pmd) & _PAGE_CONT) == _PAGE_CONT)); +} + +static inline int sw64_huge_pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(sw64_huge_pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static void sw64_huge_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + if ((((unsigned long)pmd & 0xffUL) == 0) && + ((pmd_val(*pmd) & _PAGE_CONT) == _PAGE_CONT)) { + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + mm_dec_nr_ptes(tlb->mm); + } else { + pmd_clear(pmd); + } +} + +static inline void sw64_huge_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (sw64_huge_pmd_none_or_clear_bad(pmd)) + continue; + sw64_huge_free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); +} + +static inline void sw64_huge_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + sw64_huge_free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(p4d, start); + p4d_clear(p4d); + pud_free_tlb(tlb, pud, start); + mm_dec_nr_puds(tlb->mm); +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + +#if (defined(CONFIG_FORCE_MAX_ZONEORDER) && (CONFIG_FORCE_MAX_ZONEORDER >= 16)) +static __init int sw64_256m_hugetlb_init(void) +{ + if (!size_to_hstate(1UL << (PMD_SHIFT + 5))) + hugetlb_add_hstate(PMD_SHIFT + 5 - PAGE_SHIFT); + return 0; +} +arch_initcall(sw64_256m_hugetlb_init); +#endif +#endif /* CONFIG_HUGETLB_PAGE */ + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + switch (size) { + case PMD_SIZE: + case (PMD_SIZE<<5): + return true; + } + + return false; +} diff --git a/arch/sw_64/mm/hugetlbpage_c4.c b/arch/sw_64/mm/hugetlbpage_c4.c new file mode 100644 index 000000000000..913389cd2577 --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage_c4.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW_64 Huge TLB Page Support for Kernel. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + +int pud_huge(pud_t pud) +{ + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} +EXPORT_SYMBOL(pud_huge); + +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 0; + + *pgsize = size; + + switch (size) { + case PUD_SIZE: + case PMD_SIZE: + contig_ptes = 1; + break; + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + default: + break; + } + + return contig_ptes; +} + +static pte_t get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long pgsize, unsigned long ncontig) +{ + pte_t orig_pte = huge_ptep_get(ptep); + unsigned long i; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); + return orig_pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + } else if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE * CONT_PMDS)) { + pte = (pte_t *)pmd_alloc(mm, pud, addr); + WARN_ON(addr & (sz - 1)); + } + + WARN_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + return NULL; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + + pud = pud_offset(p4d, addr); + + if (sz != PUD_SIZE && pud_none(*pud)) + return NULL; + /* hugepage or swap? */ + if (pud_huge(*pud) || !pud_present(*pud)) + return (pte_t *)pud; + /* table; check the next level */ + + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmd = pmd_offset(pud, addr); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(*pmd)) + return NULL; + if (pmd_huge(*pmd) || !pmd_present(*pmd)) + return (pte_t *)pmd; + + return NULL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + if (pagesize == CONT_PMD_SIZE) { + entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); + } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + } + return entry; +} + +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + size_t pgsize; + int i; + int ncontig; + unsigned long pfn; + pgprot_t hugeprot; + + /* + * Code needs to be expanded to handle huge swap and migration + * entries. Needed for HUGETLB and MEMORY_FAILURE. + */ + WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + ncontig = num_contig_ptes(sz, &pgsize); + pfn = pte_pfn(pte); + hugeprot = pte_pgprot(pte); + + get_and_clear(mm, addr, ptep, pgsize, ncontig); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte(ptep, pte); +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long pfn; + pgprot_t hugeprot; + int ncontig, i; + size_t pgsize; + pte_t pte; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + ncontig = CONT_PMDS; + + pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + hugeprot = pte_pgprot(pte); + pfn = pte_pfn(pte); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int ncontig; + size_t pgsize; + pte_t orig_pte = huge_ptep_get(ptep); + + if (!pte_cont(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + ncontig = CONT_PMDS; + + return get_and_clear(mm, addr, ptep, pgsize, ncontig); +} + +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + struct mm_struct *mm = vma->vm_mm; + size_t pgsize; + int ncontig; + + if (!pte_cont(READ_ONCE(*ptep))) + return ptep_clear_flush(vma, addr, ptep); + + ncontig = CONT_PMDS; + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); +} + +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = huge_ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; + } + + return 0; +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int ncontig, i; + size_t pgsize = 0; + unsigned long pfn = pte_pfn(pte); + pgprot_t hugeprot; + pte_t orig_pte; + + if (!pte_cont(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = CONT_PMDS; + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_and_clear(vma->vm_mm, addr, ptep, pgsize, ncontig); + flush_tlb_fix_spurious_fault(vma, addr, ptep); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + hugeprot = pte_pgprot(pte); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); + + return 1; +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + + unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + + switch (ps) { + case PUD_SIZE: + case PMD_SIZE * CONT_PMDS: + case PMD_SIZE: + hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + return 1; + } + + pr_err("hugepagesz: Unsupported page size %lu M\n", + ps >> 20); + return 0; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c new file mode 100644 index 000000000000..833bb59f79d0 --- /dev/null +++ b/arch/sw_64/mm/thp.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + *pmdp = entry; + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + int ret = 0; + + if (pmd_young(*pmdp)) + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, + (unsigned long *)pmdp); + return ret; +} + +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + return young; +} +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int set; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set = !test_and_set_bit(_PAGE_BIT_SPLITTING, (unsigned long *)pmdp); + if (set) { + /* need tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } +} -- Gitee From b557530b3ea1d95249007f1355bab4c4a5cf993f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:37 +0800 Subject: [PATCH 299/953] anolis: sw64: add system call support ANBZ: #4688 Add system call support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/syscall.h | 82 ++++ arch/sw_64/include/asm/uaccess.h | 311 +++++++++++++++ arch/sw_64/include/asm/unistd.h | 27 ++ arch/sw_64/include/uapi/asm/unistd.h | 12 + arch/sw_64/kernel/sys_sw64.c | 151 +++++++ arch/sw_64/kernel/syscalls/Makefile | 32 ++ arch/sw_64/kernel/syscalls/syscall.tbl | 528 +++++++++++++++++++++++++ arch/sw_64/kernel/systbls.S | 15 + 8 files changed, 1158 insertions(+) create mode 100644 arch/sw_64/include/asm/syscall.h create mode 100644 arch/sw_64/include/asm/uaccess.h create mode 100644 arch/sw_64/include/asm/unistd.h create mode 100644 arch/sw_64/include/uapi/asm/unistd.h create mode 100644 arch/sw_64/kernel/sys_sw64.c create mode 100644 arch/sw_64/kernel/syscalls/Makefile create mode 100644 arch/sw_64/kernel/syscalls/syscall.tbl create mode 100644 arch/sw_64/kernel/systbls.S diff --git a/arch/sw_64/include/asm/syscall.h b/arch/sw_64/include/asm/syscall.h new file mode 100644 index 000000000000..a821bf68be16 --- /dev/null +++ b/arch/sw_64/include/asm/syscall.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SYSCALL_H +#define _ASM_SW64_SYSCALL_H + +#include + +#ifndef __ASSEMBLY__ + +typedef long (*syscall_fn_t)(ulong, ulong, ulong, ulong, ulong, ulong); + +extern syscall_fn_t sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return regs->regs[19] ? -regs->regs[0] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + regs->regs[0] = -error; + regs->regs[19] = 1; + } else { + regs->regs[0] = val; + regs->regs[19] = 0; + } +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + *args++ = regs->regs[16]; + *args++ = regs->regs[17]; + *args++ = regs->regs[18]; + *args++ = regs->regs[19]; + *args++ = regs->regs[20]; + *args = regs->regs[21]; +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->regs[16] = *args++; + regs->regs[17] = *args++; + regs->regs[18] = *args++; + regs->regs[19] = *args++; + regs->regs[20] = *args++; + regs->regs[21] = *args; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_SW64; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_SW64_SYSCALL_H */ diff --git a/arch/sw_64/include/asm/uaccess.h b/arch/sw_64/include/asm/uaccess.h new file mode 100644 index 000000000000..f6b119f7fa78 --- /dev/null +++ b/arch/sw_64/include/asm/uaccess.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UACCESS_H +#define _ASM_SW64_UACCESS_H + +#include + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * Or at least it did once upon a time. Nowadays it is a mask that + * defines which bits of the address space are off limits. This is a + * wee bit faster than the above. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { -0x10000000000000UL }) + +#define get_fs() (current_thread_info()->addr_limit) +#define get_ds() (KERNEL_DS) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * As the sw64 uses the same address space for kernel and user + * data, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +/* + * The "ldi %1, 2b-1b(%0)" bits are magic to get the assembler to + * encode the bits we need for resolving the exception. See the + * more extensive comments with fixup_inline_exception below for + * more information. + */ + +extern void __get_user_unknown(void); + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err = 0; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __get_user_8(ptr); \ + break; \ + case 2: \ + __get_user_16(ptr); \ + break; \ + case 4: \ + __get_user_32(ptr); \ + break; \ + case 8: \ + __get_user_64(ptr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok(__gu_addr, size)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: \ + __get_user_8(__gu_addr); \ + break; \ + case 2: \ + __get_user_16(__gu_addr); \ + break; \ + case 4: \ + __get_user_32(__gu_addr); \ + break; \ + case 8: \ + __get_user_64(__gu_addr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_64(addr) \ + __asm__("1: ldl %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_32(addr) \ + __asm__("1: ldw %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_16(addr) \ + __asm__("1: ldhu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_8(addr) \ + __asm__("1: ldbu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +extern void __put_user_unknown(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __put_user_8(x, ptr); \ + break; \ + case 2: \ + __put_user_16(x, ptr); \ + break; \ + case 4: \ + __put_user_32(x, ptr); \ + break; \ + case 8: \ + __put_user_64(x, ptr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok(__pu_addr, size)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: \ + __put_user_8(x, __pu_addr); \ + break; \ + case 2: \ + __put_user_16(x, __pu_addr); \ + break; \ + case 4: \ + __put_user_32(x, __pu_addr); \ + break; \ + case 8: \ + __put_user_64(x, __pu_addr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + } \ + __pu_err; \ +}) + +/* + * The "__put_user_xx()" macros tell gcc they read from memory + * instead of writing: this is because they do not write to + * any memory gcc knows about, so there are no aliasing issues + */ +#define __put_user_64(x, addr) \ +__asm__ __volatile__("1: stl %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) + +#define __put_user_32(x, addr) \ +__asm__ __volatile__("1: stw %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_16(x, addr) \ +__asm__ __volatile__("1: sth %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_8(x, addr) \ +__asm__ __volatile__("1: stb %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +/* + * Complex access routines + */ + +extern long __copy_user(void *to, const void *from, long len); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long len) +{ + return __copy_user(to, (__force const void *)from, len); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long len) +{ + return __copy_user((__force void *)to, from, len); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern long __clear_user(void __user *to, long len); + +static inline long +clear_user(void __user *to, long len) +{ + if (__access_ok(to, len)) + len = __clear_user(to, len); + return len; +} + +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +struct page; +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); +extern unsigned long __must_check __copy_user_flushcache(void *to, + const void __user *from, unsigned long n); + +static inline int +__copy_from_user_flushcache(void *dst, const void __user *src, unsigned long size) +{ + kasan_check_write(dst, size); + return __copy_user_flushcache(dst, src, size); +} +#endif + +#include +#endif /* _ASM_SW64_UACCESS_H */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h new file mode 100644 index 000000000000..6d1b8d1e2011 --- /dev/null +++ b/arch/sw_64/include/asm/unistd.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNISTD_H +#define _ASM_SW64_UNISTD_H + +#include + +#define NR_SYSCALLS __NR_syscalls +#define NR_syscalls NR_SYSCALLS + +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 + +#endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..be844b2be9d5 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UNISTD_H +#define _UAPI_ASM_SW64_UNISTD_H + +/* + * These are traditionally the names uses for generic system calls + */ +#define __NR_umount __NR_umount2 + +#include + +#endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/sys_sw64.c b/arch/sw_64/kernel/sys_sw64.c new file mode 100644 index 000000000000..d0198aef554d --- /dev/null +++ b/arch/sw_64/kernel/sys_sw64.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +SYSCALL_DEFINE5(getsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + unsigned long w; + + switch (op) { + case GSI_IEEE_FP_CONTROL: + /* Return current software fp control & status bits. */ + /* Note that DU doesn't verify available space here. */ + + w = current_thread_info()->ieee_state & IEEE_SW_MASK; + w = swcr_update_status(w, rdfpcr()); + if (put_user(w, (unsigned long __user *) buffer)) + return -EFAULT; + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE5(setsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + switch (op) { + case SSI_IEEE_FP_CONTROL: { + unsigned long swcr, fpcr; + unsigned int *state; + + /* + * Sw_64 Architecture Handbook 4.7.7.3: + * To be fully IEEE compiant, we must track the current IEEE + * exception state in software, because spurious bits can be + * set in the trap shadow of a software-complete insn. + */ + + if (get_user(swcr, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + + /* Update softare trap enable bits. */ + *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); + + /* Update the real fpcr. */ + fpcr = rdfpcr() & FPCR_DYN_MASK; + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + return 0; + } + + case SSI_IEEE_RAISE_EXCEPTION: { + unsigned long exc, swcr, fpcr, fex; + unsigned int *state; + + if (get_user(exc, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + exc &= IEEE_STATUS_MASK; + + /* Update softare trap enable bits. */ + swcr = (*state & IEEE_SW_MASK) | exc; + *state |= exc; + + /* Update the real fpcr. */ + fpcr = rdfpcr(); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + /* If any exceptions set by this call, and are unmasked, + * send a signal. Old exceptions are not signaled. + */ + fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; + if (fex) { + int si_code = FPE_FLTUNK; + + if (fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + + send_sig_fault(SIGFPE, si_code, (void __user *)NULL, current); + } + return 0; + } + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE2(odd_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + * This does result in negative return values, so signal + * no error. + */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->regs[20] = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->regs[20] = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->regs[20] = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(sw64_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->regs[20] = fd[1]; + res = fd[0]; + } + return res; +} diff --git a/arch/sw_64/kernel/syscalls/Makefile b/arch/sw_64/kernel/syscalls/Makefile new file mode 100644 index 000000000000..cdfe761d7282 --- /dev/null +++ b/arch/sw_64/kernel/syscalls/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ + +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_64.h +kapisyshdr-y += syscall_table.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..fdf9e4cb03eb --- /dev/null +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -0,0 +1,528 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sw64 +# +# The format is: +# +# +# The is always "common" for this file +# +#0 is unused +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +#5 is unused +6 common close sys_close +#7 is unused +#8 is unused +9 common link sys_link +10 common unlink sys_unlink +#11 is unused +12 common chdir sys_chdir +13 common fchdir sys_fchdir +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown +17 common brk sys_brk +#18 is unused +19 common lseek sys_lseek +20 common getxpid sys_getxpid +#21 is unused +22 common umount2 sys_umount +23 common setuid sys_setuid +24 common getxuid sys_getxuid +#25 is unused +26 common ptrace sys_ptrace +#27 is unused +#28 is unused +#29 is unused +#30 is unused +#31 is unused +#32 is unused +33 common access sys_access +#34 is unused +#35 is unused +36 common sync sys_sync +37 common kill sys_kill +#38 is unused +39 common setpgid sys_setpgid +#40 is unused +41 common dup sys_dup +42 common pipe sys_sw64_pipe +#43 is unused +#44 is unused +45 common open sys_open +#46 is unused +47 common getxgid sys_getxgid +48 common odd_sigprocmask sys_odd_sigprocmask +#49 is unused +#50 is unused +51 common acct sys_acct +52 common sigpending sys_sigpending +#53 is unused +54 common ioctl sys_ioctl +#55 is unused +#56 is unused +57 common symlink sys_symlink +58 common readlink sys_readlink +59 common execve sys_execve +60 common umask sys_umask +61 common chroot sys_chroot +#62 is unused +63 common getpgrp sys_getpgrp +#64 is unused +#65 is unused +66 common vfork sys_vfork +67 common stat sys_newstat +68 common lstat sys_newlstat +#69 is unused +#70 is unused +71 common mmap sys_mmap +#72 is unused +73 common munmap sys_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +#77 is unused +#78 is unused +79 common getgroups sys_getgroups +80 common setgroups sys_setgroups +#81 is unused +82 common setpgrp sys_setpgid +#83 is unused +#84 is unused +#85 is unused +#86 is unused +87 common gethostname sys_gethostname +88 common sethostname sys_sethostname +#89 is unused +90 common dup2 sys_dup2 +91 common fstat sys_newfstat +92 common fcntl sys_fcntl +#93 is unused +94 common poll sys_poll +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common odd_getpriority sys_odd_getpriority +101 common send sys_send +102 common recv sys_recv +103 common sigreturn sys_sigreturn +104 common bind sys_bind +105 common setsockopt sys_setsockopt +106 common listen sys_listen +#107 is unused +#108 is unused +#109 is unused +#110 is unused +111 common sigsuspend sys_sigsuspend +#112 is unused +113 common recvmsg sys_recvmsg +114 common sendmsg sys_sendmsg +#115 is unused +#116 is unused +#117 is unused +118 common getsockopt sys_getsockopt +119 common socketcall sys_socketcall +120 common readv sys_readv +121 common writev sys_writev +#122 is unused +123 common fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 common setreuid sys_setreuid +127 common setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate +130 common ftruncate sys_ftruncate +131 common flock sys_flock +132 common setgid sys_setgid +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +#138 is unused +#139 is unused +#140 is unused +141 common getpeername sys_getpeername +#142 is unused +#143 is unused +144 common getrlimit sys_getrlimit +145 common setrlimit sys_setrlimit +#146 is unused +147 common setsid sys_setsid +148 common quotactl sys_quotactl +#149 is unused +150 common getsockname sys_getsockname +#151 is unused +#152 is unused +#153 is unused +#154 is unused +#155 is unused +156 common sigaction sys_odd_sigaction +#157 is unused +#158 is unused +#159 is unused +#160 is unused +#161 is unused +#162 is unused +#163 is unused +#164 is unused +#165 is unused +166 common setdomainname sys_setdomainname +#167 is unused +#168 is unused +#169 is unused +170 common bpf sys_bpf +171 common userfaultfd sys_userfaultfd +172 common membarrier sys_membarrier +173 common mlock2 sys_mlock2 +174 common getpid sys_getpid +175 common getppid sys_getppid +176 common getuid sys_getuid +177 common geteuid sys_geteuid +178 common getgid sys_getgid +179 common getegid sys_getegid +180 common epoll_pwait2 sys_epoll_pwait2 +181 common mount_setattr sys_mount_setattr +182 common quotactl_fd sys_quotactl_fd +183 common landlock_create_ruleset sys_landlock_create_ruleset +184 common landlock_add_rule sys_landlock_add_rule +185 common landlock_restrict_self sys_landlock_restrict_self +# 186 reserved for memfd_secret +187 common process_mrelease sys_process_mrelease +188 common futex_waitv sys_futex_waitv +189 common set_mempolicy_home_node sys_ni_syscall +190 common cachestat sys_cachestat +191 common fchmodat2 sys_fchmodat2 +#192 is unused +#193 is unused +#194 is unused +#195 is unused +#196 is unused +#197 is unused +#198 is unused +#199 is unused +200 common msgctl sys_old_msgctl +201 common msgget sys_msgget +202 common msgrcv sys_msgrcv +203 common msgsnd sys_msgsnd +204 common semctl sys_old_semctl +205 common semget sys_semget +206 common semop sys_semop +#207 is unused +208 common lchown sys_lchown +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl +211 common shmdt sys_shmdt +212 common shmget sys_shmget +#213 is unused +#214 is unused +#215 is unused +#216 is unused +217 common msync sys_msync +#218 is unused +#219 is unused +#220 is unused +#221 is unused +#222 is unused +#223 is unused +#224 is unused +#225 is unused +#226 is unused +#227 is unused +#228 is unused +229 common statfs64 sys_statfs64 +230 common fstatfs64 sys_fstatfs64 +#231 is unused +#232 is unused +233 common getpgid sys_getpgid +234 common getsid sys_getsid +235 common sigaltstack sys_sigaltstack +#236 is unused +#237 is unused +#238 is unused +#239 is unused +#240 is unused +#241 is unused +#242 is unused +#243 is unused +#244 is unused +#245 is unused +#246 is unused +#247 is unused +#248 is unused +#249 is unused +#250 is unused +#251 is unused +#252 is unused +#253 is unused +254 common sysfs sys_sysfs +#255 is unused +256 common getsysinfo sys_getsysinfo +257 common setsysinfo sys_setsysinfo +#258 is unused +#259 is unused +#260 is unused +#261 is unused +#262 is unused +#263 is unused +#264 is unused +#265 is unused +#266 is unused +#267 is unused +#268 is unused +#269 is unused +#270 is unused +271 common pidfd_send_signal sys_pidfd_send_signal +272 common io_uring_setup sys_io_uring_setup +273 common io_uring_enter sys_io_uring_enter +274 common io_uring_register sys_io_uring_register +275 common open_tree sys_open_tree +276 common move_mount sys_move_mount +277 common fsopen sys_fsopen +278 common fsconfig sys_fsconfig +279 common fsmount sys_fsmount +280 common fspick sys_fspick +281 common pidfd_open sys_pidfd_open +282 common clone3 sys_clone3 +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +#291 is unused +#292 is unused +#293 is unused +#294 is unused +#295 is unused +#296 is unused +#297 is unused +298 common getpriority sys_getpriority +299 common sigprocmask sys_sigprocmask +300 common bdflush sys_ni_syscall +#301 is unused +302 common mount sys_mount +#303 is unused +304 common swapoff sys_swapoff +305 common getdents sys_getdents +306 common create_module sys_ni_syscall +307 common init_module sys_init_module +308 common delete_module sys_delete_module +309 common get_kernel_syms sys_ni_syscall +310 common syslog sys_syslog +311 common reboot sys_reboot +312 common clone sys_clone +313 common uselib sys_uselib +314 common mlock sys_mlock +315 common munlock sys_munlock +316 common mlockall sys_mlockall +317 common munlockall sys_munlockall +318 common sysinfo sys_sysinfo +#319 is unused +#320 is unused +321 common oldumount sys_oldumount +322 common swapon sys_swapon +323 common times sys_times +324 common personality sys_personality +325 common setfsuid sys_setfsuid +326 common setfsgid sys_setfsgid +327 common ustat sys_ustat +328 common statfs sys_statfs +329 common fstatfs sys_fstatfs +330 common sched_setparam sys_sched_setparam +331 common sched_getparam sys_sched_getparam +332 common sched_setscheduler sys_sched_setscheduler +333 common sched_getscheduler sys_sched_getscheduler +334 common sched_yield sys_sched_yield +335 common sched_get_priority_max sys_sched_get_priority_max +336 common sched_get_priority_min sys_sched_get_priority_min +337 common sched_rr_get_interval sys_sched_rr_get_interval +338 common afs_syscall sys_ni_syscall +339 common uname sys_newuname +340 common nanosleep sys_nanosleep +341 common mremap sys_mremap +342 common nfsservctl sys_ni_syscall +343 common setresuid sys_setresuid +344 common getresuid sys_getresuid +345 common pciconfig_read sys_pciconfig_read +346 common pciconfig_write sys_pciconfig_write +347 common query_module sys_ni_syscall +348 common prctl sys_prctl +349 common pread64 sys_pread64 +350 common pwrite64 sys_pwrite64 +351 common rt_sigreturn sys_rt_sigreturn +352 common rt_sigaction sys_rt_sigaction +353 common rt_sigprocmask sys_rt_sigprocmask +354 common rt_sigpending sys_rt_sigpending +355 common rt_sigtimedwait sys_rt_sigtimedwait +356 common rt_sigqueueinfo sys_rt_sigqueueinfo +357 common rt_sigsuspend sys_rt_sigsuspend +358 common select sys_select +359 common gettimeofday sys_gettimeofday +360 common settimeofday sys_settimeofday +361 common getitimer sys_getitimer +362 common setitimer sys_setitimer +363 common utimes sys_utimes +364 common getrusage sys_getrusage +365 common wait4 sys_wait4 +366 common adjtimex sys_adjtimex +367 common getcwd sys_getcwd +368 common capget sys_capget +369 common capset sys_capset +370 common sendfile sys_sendfile64 +371 common setresgid sys_setresgid +372 common getresgid sys_getresgid +373 common dipc sys_ni_syscall +374 common pivot_root sys_pivot_root +375 common mincore sys_mincore +376 common pciconfig_iobase sys_pciconfig_iobase +377 common getdents64 sys_getdents64 +378 common gettid sys_gettid +379 common readahead sys_readahead +#380 is unused +381 common tkill sys_tkill +382 common setxattr sys_setxattr +383 common lsetxattr sys_lsetxattr +384 common fsetxattr sys_fsetxattr +385 common getxattr sys_getxattr +386 common lgetxattr sys_lgetxattr +387 common fgetxattr sys_fgetxattr +388 common listxattr sys_listxattr +389 common llistxattr sys_llistxattr +390 common flistxattr sys_flistxattr +391 common removexattr sys_removexattr +392 common lremovexattr sys_lremovexattr +393 common fremovexattr sys_fremovexattr +394 common futex sys_futex +395 common sched_setaffinity sys_sched_setaffinity +396 common sched_getaffinity sys_sched_getaffinity +397 common tuxcall sys_ni_syscall +398 common io_setup sys_io_setup +399 common io_destroy sys_io_destroy +400 common io_getevents sys_io_getevents +401 common io_submit sys_io_submit +402 common io_cancel sys_io_cancel +403 common io_pgetevents sys_io_pgetevents +404 common rseq sys_rseq +405 common exit_group sys_exit_group +406 common lookup_dcookie sys_lookup_dcookie +407 common epoll_create sys_epoll_create +408 common epoll_ctl sys_epoll_ctl +409 common epoll_wait sys_epoll_wait +410 common remap_file_pages sys_remap_file_pages +411 common set_tid_address sys_set_tid_address +412 common restart_syscall sys_restart_syscall +413 common fadvise64 sys_fadvise64 +414 common timer_create sys_timer_create +415 common timer_settime sys_timer_settime +416 common timer_gettime sys_timer_gettime +417 common timer_getoverrun sys_timer_getoverrun +418 common timer_delete sys_timer_delete +419 common clock_settime sys_clock_settime +420 common clock_gettime sys_clock_gettime +421 common clock_getres sys_clock_getres +422 common clock_nanosleep sys_clock_nanosleep +423 common semtimedop sys_semtimedop +424 common tgkill sys_tgkill +425 common stat64 sys_stat64 +426 common lstat64 sys_lstat64 +427 common fstat64 sys_fstat64 +428 common vserver sys_ni_syscall +429 common mbind sys_mbind +430 common get_mempolicy sys_get_mempolicy +431 common set_mempolicy sys_set_mempolicy +432 common mq_open sys_mq_open +433 common mq_unlink sys_mq_unlink +434 common mq_timedsend sys_mq_timedsend +435 common mq_timedreceive sys_mq_timedreceive +436 common mq_notify sys_mq_notify +437 common mq_getsetattr sys_mq_getsetattr +438 common waitid sys_waitid +439 common add_key sys_add_key +440 common request_key sys_request_key +441 common keyctl sys_keyctl +442 common ioprio_set sys_ioprio_set +443 common ioprio_get sys_ioprio_get +444 common inotify_init sys_inotify_init +445 common inotify_add_watch sys_inotify_add_watch +446 common inotify_rm_watch sys_inotify_rm_watch +447 common fdatasync sys_fdatasync +448 common kexec_load sys_kexec_load +449 common migrate_pages sys_migrate_pages +450 common openat sys_openat +451 common mkdirat sys_mkdirat +452 common mknodat sys_mknodat +453 common fchownat sys_fchownat +454 common futimesat sys_futimesat +455 common fstatat64 sys_fstatat64 +456 common unlinkat sys_unlinkat +457 common renameat sys_renameat +458 common linkat sys_linkat +459 common symlinkat sys_symlinkat +460 common readlinkat sys_readlinkat +461 common fchmodat sys_fchmodat +462 common faccessat sys_faccessat +463 common pselect6 sys_pselect6 +464 common ppoll sys_ppoll +465 common unshare sys_unshare +466 common set_robust_list sys_set_robust_list +467 common get_robust_list sys_get_robust_list +468 common splice sys_splice +469 common sync_file_range sys_sync_file_range +470 common tee sys_tee +471 common vmsplice sys_vmsplice +472 common move_pages sys_move_pages +473 common getcpu sys_getcpu +474 common epoll_pwait sys_epoll_pwait +475 common utimensat sys_utimensat +476 common signalfd sys_signalfd +477 common timerfd sys_ni_syscall +478 common eventfd sys_eventfd +479 common recvmmsg sys_recvmmsg +480 common fallocate sys_fallocate +481 common timerfd_create sys_timerfd_create +482 common timerfd_settime sys_timerfd_settime +483 common timerfd_gettime sys_timerfd_gettime +484 common signalfd4 sys_signalfd4 +485 common eventfd2 sys_eventfd2 +486 common epoll_create1 sys_epoll_create1 +487 common dup3 sys_dup3 +488 common pipe2 sys_pipe2 +489 common inotify_init1 sys_inotify_init1 +490 common preadv sys_preadv +491 common pwritev sys_pwritev +492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +493 common perf_event_open sys_perf_event_open +494 common fanotify_init sys_fanotify_init +495 common fanotify_mark sys_fanotify_mark +496 common prlimit64 sys_prlimit64 +497 common name_to_handle_at sys_name_to_handle_at +498 common open_by_handle_at sys_open_by_handle_at +499 common clock_adjtime sys_clock_adjtime +500 common syncfs sys_syncfs +501 common setns sys_setns +502 common accept4 sys_accept4 +503 common sendmmsg sys_sendmmsg +504 common process_vm_readv sys_process_vm_readv +505 common process_vm_writev sys_process_vm_writev +506 common kcmp sys_kcmp +507 common finit_module sys_finit_module +508 common sched_setattr sys_sched_setattr +509 common sched_getattr sys_sched_getattr +510 common renameat2 sys_renameat2 +511 common getrandom sys_getrandom +512 common memfd_create sys_memfd_create +513 common execveat sys_execveat +514 common seccomp sys_seccomp +515 common copy_file_range sys_copy_file_range +516 common preadv2 sys_preadv2 +517 common pwritev2 sys_pwritev2 +518 common statx sys_statx diff --git a/arch/sw_64/kernel/systbls.S b/arch/sw_64/kernel/systbls.S new file mode 100644 index 000000000000..010ca3f8e016 --- /dev/null +++ b/arch/sw_64/kernel/systbls.S @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/systbls.S + * + * The system call table. + */ + +#include + +#define __SYSCALL(nr, entry) .quad entry + .data + .align 3 + .globl sys_call_table +sys_call_table: +#include -- Gitee From 8d957d0592f334fbc1c9ebedd06a7f5224b1d362 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:34 +0800 Subject: [PATCH 300/953] anolis: sw64: add signal handling support ANBZ: #4688 Add ucontext/sigcontext definition and signal handling support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/signal.h | 28 ++ arch/sw_64/include/uapi/asm/sigcontext.h | 34 ++ arch/sw_64/include/uapi/asm/siginfo.h | 10 + arch/sw_64/include/uapi/asm/signal.h | 119 +++++++ arch/sw_64/include/uapi/asm/ucontext.h | 14 + arch/sw_64/kernel/signal.c | 378 +++++++++++++++++++++++ 6 files changed, 583 insertions(+) create mode 100644 arch/sw_64/include/asm/signal.h create mode 100644 arch/sw_64/include/uapi/asm/sigcontext.h create mode 100644 arch/sw_64/include/uapi/asm/siginfo.h create mode 100644 arch/sw_64/include/uapi/asm/signal.h create mode 100644 arch/sw_64/include/uapi/asm/ucontext.h create mode 100644 arch/sw_64/kernel/signal.c diff --git a/arch/sw_64/include/asm/signal.h b/arch/sw_64/include/asm/signal.h new file mode 100644 index 000000000000..4dc3b6510b86 --- /dev/null +++ b/arch/sw_64/include/asm/signal.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SIGNAL_H +#define _ASM_SW64_SIGNAL_H + +#include + +/* Digital Unix defines 64 signals. Most things should be clean enough + * to redefine this at will, if care is taken to make libc match. + */ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +struct odd_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + int sa_flags; +}; + +#include +#endif /* _ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..08a081470383 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGCONTEXT_H +#define _UAPI_ASM_SW64_SIGCONTEXT_H + +/* + * Signal context structure + * + * The context is saved before a signal handler is invoked, and it is + * restored by sys_sigreturn / sys_rt_sigreturn. + */ +struct sigcontext { + long sc_onstack; + long sc_mask; + long sc_pc; + long sc_ps; + long sc_regs[32]; + long sc_ownedfp; + long sc_fpregs[128]; /* SIMD-FP */ + unsigned long sc_fpcr; + /* TODO: Following are unused, to be removed and synced with libc */ + unsigned long sc_fp_control; + unsigned long sc_reserved1, sc_reserved2; + unsigned long sc_ssize; + char *sc_sbase; + unsigned long sc_traparg_a0; + unsigned long sc_traparg_a1; + unsigned long sc_traparg_a2; + unsigned long sc_fp_trap_pc; + unsigned long sc_fp_trigger_sum; + unsigned long sc_fp_trigger_inst; +}; + + +#endif /* _UAPI_ASM_SW64_SIGCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..f47fb917c9b2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGINFO_H +#define _UAPI_ASM_SW64_SIGINFO_H + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) + +#include + + +#endif /* _UAPI_ASM_SW64_SIGINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h new file mode 100644 index 000000000000..0d7a935fe37c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGNAL_H +#define _UAPI_ASM_SW64_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + + +/* + * Linux/sw64 different signal numbers that Linux/i386. + */ +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGEMT 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGBUS 10 +#define SIGSEGV 11 +#define SIGSYS 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGURG 16 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGIO 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGINFO 29 +#define SIGUSR1 30 +#define SIGUSR2 31 + +#define SIGPOLL SIGIO +#define SIGPWR SIGINFO +#define SIGIOT SIGABRT + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ + +#define SA_ONSTACK 0x00000001 +#define SA_RESTART 0x00000002 +#define SA_NOCLDSTOP 0x00000004 +#define SA_NODEFER 0x00000008 +#define SA_RESETHAND 0x00000010 +#define SA_NOCLDWAIT 0x00000020 +#define SA_SIGINFO 0x00000040 + +#define SA_ONESHOT SA_RESETHAND +#define SA_NOMASK SA_NODEFER + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int sig, struct siginfo *info, void *ucontext); + } _u; + sigset_t sa_mask; + int sa_flags; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#endif /* _UAPI_ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/ucontext.h b/arch/sw_64/include/uapi/asm/ucontext.h new file mode 100644 index 000000000000..c5d6e24e3e5f --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ucontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UCONTEXT_H +#define _UAPI_ASM_SW64_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + old_sigset_t uc_old_sigmask; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _UAPI_ASM_SW64_UCONTEXT_H */ diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c new file mode 100644 index 000000000000..496f33bb1c89 --- /dev/null +++ b/arch/sw_64/kernel/signal.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/signal.c + * + * Copyright (C) 1995 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "proto.h" + + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} + +SYSCALL_DEFINE3(odd_sigaction, int, sig, + const struct odd_sigaction __user *, act, + struct odd_sigaction __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + old_sigset_t mask; + int ret; + + if (act) { + if (!access_ok(act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) + return -EFAULT; + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) + return -EFAULT; + } + + return ret; +} + +/* + * Do a signal return; undo the signal stack. + */ + +#if _NSIG_WORDS > 1 +# error "Non SA_SIGINFO frame needs rearranging" +#endif + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +/* + * If this changes, userland unwinders that Know Things about our signal + * frame will break. Do not undertake lightly. It also implies an ABI + * change wrt the size of siginfo_t, which may cause some pain. + */ +extern char compile_time_assert + [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; + +static long +restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) +{ + long err = __get_user(regs->pc, &sc->sc_pc); + + err |= __copy_from_user(regs, sc->sc_regs, sizeof_field(struct pt_regs, regs)); + /* simd-fp */ + err |= __copy_from_user(¤t->thread.fpstate, &sc->sc_fpregs, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + if (likely(!err)) + __fpstate_restore(current); + + return err; +} + +/* + * Note that this syscall is also used by setcontext(3) to install + * a given sigcontext. This because it's impossible to set *all* + * registers and transfer control from userland. + */ + +SYSCALL_DEFINE1(sigreturn, struct sigcontext __user *, sc) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good sigcontext before using it */ + if (!access_ok(sc, sizeof(*sc))) + goto give_sigsegv; + if (__get_user(set.sig[0], &sc->sc_mask)) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(sc, regs)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + +SYSCALL_DEFINE1(rt_sigreturn, struct rt_sigframe __user *, frame) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good ucontext_t before using it */ + if (!access_ok(&frame->uc, sizeof(frame->uc))) + goto give_sigsegv; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) + goto give_sigsegv; + + if (restore_altstack(&frame->uc.uc_stack)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + + +/* + * Set up a signal frame. + */ + +static inline void __user * +get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) +{ + return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); +} + +static long +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + long err = 0; + + err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); + err |= __put_user(mask, &sc->sc_mask); + err |= __put_user(regs->pc, &sc->sc_pc); + err |= __put_user(8, &sc->sc_ps); + + err |= __copy_to_user(sc->sc_regs, regs, sizeof_field(struct pt_regs, regs)); + err |= __put_user(0, sc->sc_regs+31); + /* simd-fp */ + __fpstate_save(current); + err |= __copy_to_user(&sc->sc_fpregs, ¤t->thread.fpstate, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + return err; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + unsigned long err = 0; + struct rt_sigframe __user *frame; + + frame = get_sigframe(ksig, regs->regs[30], sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(set->sig[0], &frame->uc.uc_old_sigmask); + err |= __save_altstack(&frame->uc.uc_stack, regs->regs[30]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* "Return" to the handler */ + regs->regs[26] = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); + regs->regs[27] = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->regs[16] = ksig->sig; /* a0: signal number */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->regs[17] = (unsigned long) &frame->info; + regs->regs[18] = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->regs[17] = 0; + regs->regs[18] = (unsigned long) &frame->uc.uc_mcontext; + } + regs->regs[30] = (unsigned long) frame; + +#if DEBUG_SIG + pr_info("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->regs[26]); +#endif + + return 0; +} + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void +do_signal(struct pt_regs *regs) +{ + unsigned long single_stepping = ptrace_cancel_bpt(current); + struct ksignal ksig; + + /* This lets the debugger run, ... */ + if (get_signal(&ksig)) { + /* ... so re-check the single stepping. */ + single_stepping |= ptrace_cancel_bpt(current); + /* Whee! Actually deliver the signal. */ + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { + regs->regs[0] = EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + /* reset v0 and a3 and replay syscall */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = EINTR; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + handle_signal(&ksig, regs); + } else { + single_stepping |= ptrace_cancel_bpt(current); + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + /* Reset v0 and a3 and replay syscall. */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTART_RESTARTBLOCK: + /* Set v0 to the restart_syscall and replay */ + regs->regs[0] = __NR_restart_syscall; + regs->pc -= 4; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + restore_saved_sigmask(); + } + if (single_stepping) + ptrace_set_bpt(current); /* re-set breakpoint */ +} + +asmlinkage void +do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + local_irq_enable(); + + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + + if (thread_flags & _TIF_UPROBE) { + unsigned long pc = regs->pc; + + uprobe_notify_resume(regs); + sw64_fix_uretprobe(regs, pc - 4); + } + + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + local_irq_disable(); + thread_flags = READ_ONCE(current_thread_info()->flags); + } while (thread_flags & _TIF_WORK_MASK); +} -- Gitee From 87e9be0a94e6f6e50c8b3ac979690ce8ec582805 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:08 +0800 Subject: [PATCH 301/953] anolis: sw64: add FPU support ANBZ: #4688 Add FPU and floating-point emulation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/fpu.h | 91 ++ arch/sw_64/include/asm/sfp-machine.h | 69 + arch/sw_64/include/uapi/asm/fpu.h | 233 +++ arch/sw_64/kernel/fpu.S | 111 ++ arch/sw_64/math-emu/Makefile | 10 + arch/sw_64/math-emu/math.c | 2255 ++++++++++++++++++++++++++ arch/sw_64/math-emu/qrnnd.S | 133 ++ arch/sw_64/math-emu/sfp-util.h | 41 + 8 files changed, 2943 insertions(+) create mode 100644 arch/sw_64/include/asm/fpu.h create mode 100644 arch/sw_64/include/asm/sfp-machine.h create mode 100644 arch/sw_64/include/uapi/asm/fpu.h create mode 100644 arch/sw_64/kernel/fpu.S create mode 100644 arch/sw_64/math-emu/Makefile create mode 100644 arch/sw_64/math-emu/math.c create mode 100644 arch/sw_64/math-emu/qrnnd.S create mode 100644 arch/sw_64/math-emu/sfp-util.h diff --git a/arch/sw_64/include/asm/fpu.h b/arch/sw_64/include/asm/fpu.h new file mode 100644 index 000000000000..a0b0ff5af168 --- /dev/null +++ b/arch/sw_64/include/asm/fpu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FPU_H +#define _ASM_SW64_FPU_H + +#include +#ifdef __KERNEL__ + +/* + * The following two functions don't need trapb/excb instructions + * around the mf_fpcr/mt_fpcr instructions because (a) the kernel + * never generates arithmetic faults and (b) sys_call instructions + * are implied trap barriers. + */ + +static inline unsigned long +rdfpcr(void) +{ + unsigned long ret; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " rfpcr $f0\n\t" + " fimovd $f0, %1\n\t" + " vldd $f0, %0\n\t" + : "=m"(*fp), "=r"(ret)); + + return ret; +} + +static inline void +wrfpcr(unsigned long val) +{ + unsigned long tmp; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " ifmovd %2, $f0\n\t" + " wfpcr $f0\n\t" + " and %2, 0x3, %1\n\t" + " beq %1, 1f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 2f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 3f\n\t" + " setfpec3\n\t" + " br 6f\n\t" + "1: setfpec0\n\t" + " br 6f\n\t" + "2: setfpec1\n\t" + " br 6f\n\t" + "3: setfpec2\n\t" + "6: vldd $f0, %0\n\t" + : "=m"(*fp), "=&r"(tmp) : "r"(val)); +} + +static inline unsigned long +swcr_update_status(unsigned long swcr, unsigned long fpcr) +{ + /* + * SW64 implements most of the bits in hardware. Collect + * the acrued exception bits from the real fpcr. + */ + swcr &= ~(IEEE_STATUS_MASK0 | IEEE_STATUS_MASK1 + | IEEE_STATUS_MASK2 | IEEE_STATUS_MASK3); + swcr |= (fpcr >> 35) & IEEE_STATUS_MASK0; + swcr |= (fpcr >> 13) & IEEE_STATUS_MASK1; + swcr |= (fpcr << 14) & IEEE_STATUS_MASK2; + swcr |= (fpcr << 36) & IEEE_STATUS_MASK3; + return swcr; +} + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +extern void sw64_write_simd_fp_reg_s(unsigned long reg, + unsigned long f0, unsigned long f1); +extern void sw64_write_simd_fp_reg_d(unsigned long reg, + unsigned long f0, unsigned long f1, + unsigned long f2, unsigned long f3); +extern void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a); +extern void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value); +extern void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/asm/sfp-machine.h b/arch/sw_64/include/asm/sfp-machine.h new file mode 100644 index 000000000000..156bebc9c515 --- /dev/null +++ b/arch/sw_64/include/asm/sfp-machine.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Machine-dependent software floating-point definitions. + * sw64 kernel version. + * Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + * This file is part of the GNU C Library. + * Contributed by Richard Henderson (rth@cygnus.com), + * Jakub Jelinek (jakub@redhat.com) and + * David S. Miller (davem@redhat.com). + */ + +#ifndef _ASM_SW64_SFP_MACHINE_H +#define _ASM_SW64_SFP_MACHINE_H + +#define _FP_W_TYPE_SIZE 64 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R, X, Y) \ + _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S, R, X, Y) +#define _FP_MUL_MEAT_D(R, X, Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_Q(R, X, Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_imm(S, R, X, Y, _FP_DIV_HELP_imm) +#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv(D, R, X, Y) +#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv(Q, R, X, Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D +#define _FP_NANFRAC_Q _FP_QNANBIT_Q +#define _FP_NANSIGN_S 1 +#define _FP_NANSIGN_D 1 +#define _FP_NANSIGN_Q 1 + +#define _FP_KEEPNANFRACP 1 + +/* Sw_64 Architecture Handbook, 4.7.10.4 sais that + * we should prefer any type of NaN in Fb, then Fa. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R, X); \ + R##_c = FP_CLS_NAN; \ +} while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE mode +#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) +#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) +#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) +#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) + +/* Exception flags. */ +#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV +#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF +#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF +#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE +#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE +#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO + +#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) + +/* We write the results always */ +#define FP_INHIBIT_RESULTS 0 + +#endif /* _ASM_SW64_SFP_MACHINE_H */ diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h new file mode 100644 index 000000000000..8945816c542b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FPU_H +#define _UAPI_ASM_SW64_FPU_H + +/* + * SW-64 floating-point control register defines: + */ +#define FPCR_DNOD (1UL << 47) /* denorm INV trap disable */ +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_DNZ (1UL << 48) /* denorms to zero */ +#else +#define FPCR_DNOE (1UL << 48) /* hardware denormal support */ +#endif +#define FPCR_INVD (1UL << 49) /* invalid op disable (opt.) */ +#define FPCR_DZED (1UL << 50) /* division by zero disable (opt.) */ +#define FPCR_OVFD (1UL << 51) /* overflow disable (optional) */ +#define FPCR_INV (1UL << 52) /* invalid operation */ +#define FPCR_DZE (1UL << 53) /* division by zero */ +#define FPCR_OVF (1UL << 54) /* overflow */ +#define FPCR_UNF (1UL << 55) /* underflow */ +#define FPCR_INE (1UL << 56) /* inexact */ +#define FPCR_IOV (1UL << 57) /* integer overflow */ +#define FPCR_UNDZ (1UL << 60) /* underflow to zero (opt.) */ +#define FPCR_UNFD (1UL << 61) /* underflow disable (opt.) */ +#define FPCR_INED (1UL << 62) /* inexact disable (opt.) */ +#define FPCR_SUM (1UL << 63) /* summary bit */ + +#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ +#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ +#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ +#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ +#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ +#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) + +#define FPCR_MASK 0xffff800000000000L + +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_INIT FPCR_DYN_NORMAL +#else +#define FPCR_INIT (FPCR_DYN_NORMAL | FPCR_DNOE) +#endif + +/* status bit coming from hardware fpcr . definde by fire3 */ +#define FPCR_STATUS_INV0 (1UL << 52) +#define FPCR_STATUS_DZE0 (1UL << 53) +#define FPCR_STATUS_OVF0 (1UL << 54) +#define FPCR_STATUS_UNF0 (1UL << 55) +#define FPCR_STATUS_INE0 (1UL << 56) +#define FPCR_STATUS_OVI0 (1UL << 57) + +#define FPCR_STATUS_INV1 (1UL << 36) +#define FPCR_STATUS_DZE1 (1UL << 37) +#define FPCR_STATUS_OVF1 (1UL << 38) +#define FPCR_STATUS_UNF1 (1UL << 39) +#define FPCR_STATUS_INE1 (1UL << 40) +#define FPCR_STATUS_OVI1 (1UL << 41) + +#define FPCR_STATUS_INV2 (1UL << 20) +#define FPCR_STATUS_DZE2 (1UL << 21) +#define FPCR_STATUS_OVF2 (1UL << 22) +#define FPCR_STATUS_UNF2 (1UL << 23) +#define FPCR_STATUS_INE2 (1UL << 24) +#define FPCR_STATUS_OVI2 (1UL << 25) + +#define FPCR_STATUS_INV3 (1UL << 4) +#define FPCR_STATUS_DZE3 (1UL << 5) +#define FPCR_STATUS_OVF3 (1UL << 6) +#define FPCR_STATUS_UNF3 (1UL << 7) +#define FPCR_STATUS_INE3 (1UL << 8) +#define FPCR_STATUS_OVI3 (1UL << 9) + +#define FPCR_STATUS_MASK0 (FPCR_STATUS_INV0 | FPCR_STATUS_DZE0 | \ + FPCR_STATUS_OVF0 | FPCR_STATUS_UNF0 | \ + FPCR_STATUS_INE0 | FPCR_STATUS_OVI0) + +#define FPCR_STATUS_MASK1 (FPCR_STATUS_INV1 | FPCR_STATUS_DZE1 | \ + FPCR_STATUS_OVF1 | FPCR_STATUS_UNF1 | \ + FPCR_STATUS_INE1 | FPCR_STATUS_OVI1) + +#define FPCR_STATUS_MASK2 (FPCR_STATUS_INV2 | FPCR_STATUS_DZE2 | \ + FPCR_STATUS_OVF2 | FPCR_STATUS_UNF2 | \ + FPCR_STATUS_INE2 | FPCR_STATUS_OVI2) + +#define FPCR_STATUS_MASK3 (FPCR_STATUS_INV3 | FPCR_STATUS_DZE3 | \ + FPCR_STATUS_OVF3 | FPCR_STATUS_UNF3 | \ + FPCR_STATUS_INE3 | FPCR_STATUS_OVI3) + + +/* + * IEEE trap enables are implemented in software. These per-thread + * bits are stored in the "ieee_state" field of "struct thread_info". + * Thus, the bits are defined so as not to conflict with the + * floating-point enable bit (which is architected). + */ +#define IEEE_TRAP_ENABLE_INV (1UL << 1) /* invalid op */ +#define IEEE_TRAP_ENABLE_DZE (1UL << 2) /* division by zero */ +#define IEEE_TRAP_ENABLE_OVF (1UL << 3) /* overflow */ +#define IEEE_TRAP_ENABLE_UNF (1UL << 4) /* underflow */ +#define IEEE_TRAP_ENABLE_INE (1UL << 5) /* inexact */ +#define IEEE_TRAP_ENABLE_DNO (1UL << 6) /* denorm */ +#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ + IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ + IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define IEEE_MAP_DMZ (1UL << 12) /* Map denorm inputs to zero */ +#define IEEE_MAP_UMZ (1UL << 13) /* Map underflowed outputs to zero */ + +#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define IEEE_STATUS_INV (1UL << 17) +#define IEEE_STATUS_DZE (1UL << 18) +#define IEEE_STATUS_OVF (1UL << 19) +#define IEEE_STATUS_UNF (1UL << 20) +#define IEEE_STATUS_INE (1UL << 21) +#define IEEE_STATUS_DNO (1UL << 22) + + +#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ + IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ + IEEE_STATUS_INE | IEEE_STATUS_DNO) + +#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ + IEEE_STATUS_MASK | IEEE_MAP_MASK) + +#define IEEE_CURRENT_RM_SHIFT 32 +#define IEEE_CURRENT_RM_MASK (3UL << IEEE_CURRENT_RM_SHIFT) + +#define IEEE_STATUS_TO_EXCSUM_SHIFT 16 + +#define IEEE_INHERIT (1UL << 63) /* inherit on thread create? */ + +/* ieee_state expand to surport simd added by fire3 */ + +#define IEEE_STATUS_INV0 (1UL << 17) +#define IEEE_STATUS_DZE0 (1UL << 18) +#define IEEE_STATUS_OVF0 (1UL << 19) +#define IEEE_STATUS_UNF0 (1UL << 20) +#define IEEE_STATUS_INE0 (1UL << 21) +#define IEEE_STATUS_DNO0 (1UL << 22) +#define IEEE_STATUS_MASK0 (IEEE_STATUS_INV0 | IEEE_STATUS_DZE0 | \ + IEEE_STATUS_OVF0 | IEEE_STATUS_UNF0 | \ + IEEE_STATUS_INE0 | IEEE_STATUS_DNO0) + +#define IEEE_STATUS0_TO_EXCSUM_SHIFT 16 + +#define IEEE_STATUS_INV1 (1UL << 23) +#define IEEE_STATUS_DZE1 (1UL << 24) +#define IEEE_STATUS_OVF1 (1UL << 25) +#define IEEE_STATUS_UNF1 (1UL << 26) +#define IEEE_STATUS_INE1 (1UL << 27) +#define IEEE_STATUS_DNO1 (1UL << 28) +#define IEEE_STATUS_MASK1 (IEEE_STATUS_INV1 | IEEE_STATUS_DZE1 | \ + IEEE_STATUS_OVF1 | IEEE_STATUS_UNF1 | \ + IEEE_STATUS_INE1 | IEEE_STATUS_DNO1) + +#define IEEE_STATUS1_TO_EXCSUM_SHIFT 22 + +#define IEEE_STATUS_INV2 (1UL << 34) +#define IEEE_STATUS_DZE2 (1UL << 35) +#define IEEE_STATUS_OVF2 (1UL << 36) +#define IEEE_STATUS_UNF2 (1UL << 37) +#define IEEE_STATUS_INE2 (1UL << 38) +#define IEEE_STATUS_DNO2 (1UL << 39) +#define IEEE_STATUS_MASK2 (IEEE_STATUS_INV2 | IEEE_STATUS_DZE2 | \ + IEEE_STATUS_OVF2 | IEEE_STATUS_UNF2 | \ + IEEE_STATUS_INE2 | IEEE_STATUS_DNO2) + +#define IEEE_STATUS2_TO_EXCSUM_SHIFT 33 + +#define IEEE_STATUS_INV3 (1UL << 40) +#define IEEE_STATUS_DZE3 (1UL << 41) +#define IEEE_STATUS_OVF3 (1UL << 42) +#define IEEE_STATUS_UNF3 (1UL << 43) +#define IEEE_STATUS_INE3 (1UL << 44) +#define IEEE_STATUS_DNO3 (1UL << 45) +#define IEEE_STATUS_MASK3 (IEEE_STATUS_INV3 | IEEE_STATUS_DZE3 | \ + IEEE_STATUS_OVF3 | IEEE_STATUS_UNF3 | \ + IEEE_STATUS_INE3 | IEEE_STATUS_DNO3) + +#define IEEE_STATUS3_TO_EXCSUM_SHIFT 39 + + +/* + * Convert the software IEEE trap enable and status bits into the + * hardware fpcr format. + * + * Digital Unix engineers receive my thanks for not defining the + * software bits identical to the hardware bits. The chip designers + * receive my thanks for making all the not-implemented fpcr bits + * RAZ forcing us to use system calls to read/write this value. + */ +static inline unsigned long +ieee_swcr_to_fpcr(unsigned long sw) +{ + unsigned long fp; + + fp = (sw & IEEE_STATUS_MASK0) << 35; + fp |= (sw & IEEE_STATUS_MASK1) << 13; + fp |= (sw & IEEE_STATUS_MASK2) >> 14; + fp |= (sw & IEEE_STATUS_MASK3) >> 36; + + fp |= (sw & IEEE_MAP_DMZ) << 36; + fp |= (sw & IEEE_STATUS_MASK0 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK1 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK2 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK3 ? FPCR_SUM : 0); + fp |= (~sw & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF)) << 48; + fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; + fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); + fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; + return fp; +} + +static inline unsigned long +ieee_fpcr_to_swcr(unsigned long fp) +{ + unsigned long sw; + + sw = (fp >> 35) & IEEE_STATUS_MASK; + sw |= (fp >> 36) & IEEE_MAP_DMZ; + sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF); + sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); + sw |= (fp >> 47) & IEEE_MAP_UMZ; + sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; + return sw; +} +#endif /* _UAPI_ASM_SW64_FPU_H */ diff --git a/arch/sw_64/kernel/fpu.S b/arch/sw_64/kernel/fpu.S new file mode 100644 index 000000000000..ddc988681fdd --- /dev/null +++ b/arch/sw_64/kernel/fpu.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text + .set noat +ENTRY(__fpstate_save) + /* a0: prev task */ +#ifdef CONFIG_SUBARCH_C4 + csrr $1, CSR_WR_FREGS + beq $1, out +#endif + vstd $f0, TASK_THREAD_F0(a0) + vstd $f1, TASK_THREAD_F1(a0) + vstd $f2, TASK_THREAD_F2(a0) + vstd $f3, TASK_THREAD_F3(a0) + vstd $f4, TASK_THREAD_F4(a0) + vstd $f5, TASK_THREAD_F5(a0) + vstd $f6, TASK_THREAD_F6(a0) + vstd $f7, TASK_THREAD_F7(a0) + vstd $f8, TASK_THREAD_F8(a0) + vstd $f9, TASK_THREAD_F9(a0) + vstd $f10, TASK_THREAD_F10(a0) + vstd $f11, TASK_THREAD_F11(a0) + vstd $f12, TASK_THREAD_F12(a0) + vstd $f13, TASK_THREAD_F13(a0) + vstd $f14, TASK_THREAD_F14(a0) + vstd $f15, TASK_THREAD_F15(a0) + vstd $f16, TASK_THREAD_F16(a0) + vstd $f17, TASK_THREAD_F17(a0) + vstd $f18, TASK_THREAD_F18(a0) + vstd $f19, TASK_THREAD_F19(a0) + vstd $f20, TASK_THREAD_F20(a0) + vstd $f21, TASK_THREAD_F21(a0) + vstd $f22, TASK_THREAD_F22(a0) + vstd $f23, TASK_THREAD_F23(a0) + vstd $f24, TASK_THREAD_F24(a0) + vstd $f25, TASK_THREAD_F25(a0) + vstd $f26, TASK_THREAD_F26(a0) + vstd $f27, TASK_THREAD_F27(a0) + rfpcr $f0 + vstd $f28, TASK_THREAD_F28(a0) + vstd $f29, TASK_THREAD_F29(a0) + vstd $f30, TASK_THREAD_F30(a0) + fstd $f0, TASK_THREAD_FPCR(a0) + vldd $f0, TASK_THREAD_F0(a0) +out: + ret +END(__fpstate_save) + +ENTRY(__fpstate_restore) + /* a0: next task */ + fldd $f0, TASK_THREAD_FPCR(a0) + wfpcr $f0 + fimovd $f0, t1 + and t1, 0x3, t1 + beq t1, $setfpec_0 + subl t1, 0x1, t1 + beq t1, $setfpec_1 + subl t1, 0x1, t1 + beq t1, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f0, TASK_THREAD_F0(a0) + vldd $f1, TASK_THREAD_F1(a0) + vldd $f2, TASK_THREAD_F2(a0) + vldd $f3, TASK_THREAD_F3(a0) + vldd $f4, TASK_THREAD_F4(a0) + vldd $f5, TASK_THREAD_F5(a0) + vldd $f6, TASK_THREAD_F6(a0) + vldd $f7, TASK_THREAD_F7(a0) + vldd $f8, TASK_THREAD_F8(a0) + vldd $f9, TASK_THREAD_F9(a0) + vldd $f10, TASK_THREAD_F10(a0) + vldd $f11, TASK_THREAD_F11(a0) + vldd $f12, TASK_THREAD_F12(a0) + vldd $f13, TASK_THREAD_F13(a0) + vldd $f14, TASK_THREAD_F14(a0) + vldd $f15, TASK_THREAD_F15(a0) + vldd $f16, TASK_THREAD_F16(a0) + vldd $f17, TASK_THREAD_F17(a0) + vldd $f18, TASK_THREAD_F18(a0) + vldd $f19, TASK_THREAD_F19(a0) + vldd $f20, TASK_THREAD_F20(a0) + vldd $f21, TASK_THREAD_F21(a0) + vldd $f22, TASK_THREAD_F22(a0) + vldd $f23, TASK_THREAD_F23(a0) + vldd $f24, TASK_THREAD_F24(a0) + vldd $f25, TASK_THREAD_F25(a0) + vldd $f26, TASK_THREAD_F26(a0) + vldd $f27, TASK_THREAD_F27(a0) + vldd $f28, TASK_THREAD_F28(a0) + vldd $f29, TASK_THREAD_F29(a0) + vldd $f30, TASK_THREAD_F30(a0) +#ifdef CONFIG_SUBARCH_C4 + csrw $31, CSR_WR_FREGS +#endif + ret +END(__fpstate_restore) diff --git a/arch/sw_64/math-emu/Makefile b/arch/sw_64/math-emu/Makefile new file mode 100644 index 000000000000..72e750d138e6 --- /dev/null +++ b/arch/sw_64/math-emu/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the FPU instruction emulation. +# + +ccflags-y := -w + +obj-$(CONFIG_MATHEMU) += math-emu.o + +math-emu-objs := math.o qrnnd.o diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c new file mode 100644 index 000000000000..b578752f0730 --- /dev/null +++ b/arch/sw_64/math-emu/math.c @@ -0,0 +1,2255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Modify History + * + * who when what + * --- ---- ---- + * stone 2004-09-02 Add SIMD floating emulation code + * fire3 2008-12-27 Add SIMD floating emulation code for SW64 + */ + +#include + +#include + +#include "sfp-util.h" + +#include +#include +#include + +/* + * This is for sw64 + */ + +#define IEEE_E_STATUS_MASK IEEE_STATUS_MASK +#define IEEE_E_STATUS_TO_EXCSUM_SHIFT 0 +#define SW64_FP_DENOMAL 1 /* A denormal data */ +#define SW64_FP_NORMAL 0 /* A denormal data */ +#define SW64_FP_NAN 2 + +#define SW64_FP_NAN_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 255: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + +#define SW64_FP_NAN_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 2047: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + + +#define SW64_FP_NORMAL_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +#define SW64_FP_NORMAL_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +/* Operation Code for SW64 */ +#define OP_SIMD_1 0x1A +#define OP_SIMD_2 0x1B +#define OP_SIMD_MUL_ADD 0x1B +#define OP_SIMD_NORMAL 0x1A +#define OP_MUL_ADD 0x19 + +#define FNC_FMAS 0x0 +#define FNC_FMAD 0x1 +#define FNC_FMSS 0x2 +#define FNC_FMSD 0x3 +#define FNC_FNMAS 0x4 +#define FNC_FNMAD 0x5 +#define FNC_FNMSS 0x6 +#define FNC_FNMSD 0x7 + +#define FNC_VADDS 0x80 +#define FNC_VADDD 0x81 +#define FNC_VSUBS 0x82 +#define FNC_VSUBD 0x83 +#define FNC_VMULS 0x84 +#define FNC_VMULD 0x85 +#define FNC_VDIVS 0x86 +#define FNC_VDIVD 0x87 +#define FNC_VSQRTS 0x88 +#define FNC_VSQRTD 0x89 + +#define FNC_VFCMPEQ 0x8c +#define FNC_VFCMPLE 0x8d +#define FNC_VFCMPLT 0x8e +#define FNC_VFCMPUN 0x8f + +#define FNC_VCPYS 0x90 +#define FNC_VCPYSE 0x91 +#define FNC_VCPYSN 0x92 + +#define FNC_VMAS 0x0 +#define FNC_VMAD 0x1 +#define FNC_VMSS 0x2 +#define FNC_VMSD 0x3 +#define FNC_VNMAS 0x4 +#define FNC_VNMAD 0x5 +#define FNC_VNMSS 0x6 +#define FNC_VNMSD 0x7 + +long simd_fp_emul_s(unsigned long pc); +long simd_fp_emul_d(unsigned long pc); +long mul_add_fp_emul(unsigned long pc); +long simd_cmp_emul_d(unsigned long pc); + +long simd_mul_add_fp_emul_d(unsigned long pc); +long simd_mul_add_fp_emul_s(unsigned long pc); + +void read_fp_reg_s(unsigned long reg, unsigned long *p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +#define LOW_64_WORKING 1 +#define HIGH_64_WORKING 2 + +/* + * End for sw64 + */ + +#define OPC_HMC 0x00 +#define OPC_INTA 0x10 +#define OPC_INTL 0x11 +#define OPC_INTS 0x12 +#define OPC_INTM 0x13 +#define OPC_FLTC 0x14 +#define OPC_FLTV 0x15 +#define OPC_FLTI 0x16 +#define OPC_FLTL 0x17 +#define OPC_MISC 0x18 +#define OPC_JSR 0x1a + +#define FOP_SRC_S 0 +#define FOP_SRC_T 2 +#define FOP_SRC_Q 3 + +#define FOP_FNC_ADDx 0 +#define FOP_FNC_CVTQL 0 +#define FOP_FNC_SUBx 1 +#define FOP_FNC_MULx 2 +#define FOP_FNC_DIVx 3 +#define FOP_FNC_CMPxUN 4 +#define FOP_FNC_CMPxEQ 5 +#define FOP_FNC_CMPxLT 6 +#define FOP_FNC_CMPxLE 7 +#define FOP_FNC_SQRTx 11 +#define FOP_FNC_CVTxS 12 +#define FOP_FNC_CVTxT 14 +#define FOP_FNC_CVTxQ 15 + +/* this is for sw64 added by fire3*/ +#define FOP_FNC_ADDS 0 +#define FOP_FNC_ADDD 1 +#define FOP_FNC_SUBS 2 +#define FOP_FNC_SUBD 3 +#define FOP_FNC_MULS 4 +#define FOP_FNC_MULD 5 +#define FOP_FNC_DIVS 6 +#define FOP_FNC_DIVD 7 +#define FOP_FNC_SQRTS 8 +#define FOP_FNC_SQRTD 9 + +#define FOP_FNC_CMPEQ 0x10 +#define FOP_FNC_CMPLE 0x11 +#define FOP_FNC_CMPLT 0x12 +#define FOP_FNC_CMPUN 0x13 + +#define FOP_FNC_CVTSD 0x20 +#define FOP_FNC_CVTDS 0x21 +#define FOP_FNC_CVTLS 0x2D +#define FOP_FNC_CVTLD 0x2F +#define FOP_FNC_CVTDL 0x27 +#define FOP_FNC_CVTDL_G 0x22 +#define FOP_FNC_CVTDL_P 0x23 +#define FOP_FNC_CVTDL_Z 0x24 +#define FOP_FNC_CVTDL_N 0x25 + +#define FOP_FNC_CVTWL 0x28 +#define FOP_FNC_CVTLW 0x29 + +/* fire3 added end */ + + +#define MISC_TRAPB 0x0000 +#define MISC_EXCB 0x0400 + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +#ifdef MODULE + +MODULE_DESCRIPTION("FP Software completion module"); + +extern long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +extern long (*sw64_fp_emul)(unsigned long pc); + +static long (*save_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +static long (*save_emul)(unsigned long pc); + +long do_sw_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask); +long do_sw_fp_emul(unsigned long pc); + +int init_module(void) +{ + save_emul_imprecise = sw64_fp_emul_imprecise; + save_emul = sw64_fp_emul; + sw64_fp_emul_imprecise = do_sw_fp_emul_imprecise; + sw64_fp_emul = do_sw_fp_emul; + return 0; +} + +void cleanup_module(void) +{ + sw64_fp_emul_imprecise = save_emul_imprecise; + sw64_fp_emul = save_emul; +} + +#undef sw64_fp_emul_imprecise +#define sw64_fp_emul_imprecise do_sw_fp_emul_imprecise +#undef sw64_fp_emul +#define sw64_fp_emul do_sw_fp_emul + +#endif /* MODULE */ + + +/* + * Emulate the floating point instruction at address PC. Returns -1 if the + * instruction to be emulated is illegal (such as with the opDEC trap), else + * the SI_CODE for a SIGFPE signal, else 0 if everything's ok. + * + * Notice that the kernel does not and cannot use FP regs. This is good + * because it means that instead of saving/restoring all fp regs, we simply + * stick the result of the operation into the appropriate register. + */ +long sw64_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); + + unsigned long fa, fb, fc, func, mode, mode_bk, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long opcode; + + get_user(insn, (__u32 *)pc); + opcode = (insn >> 26) & 0x3f; + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + pr_debug("======= Entering Floating mathe emulation =====\n"); + pr_debug("Floating math emulation insn = %#lx, opcode=%d, func=%d\n", insn, opcode, func); + pr_debug("SW64 hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("SW64 software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + + if (opcode == OP_SIMD_NORMAL) { /* float simd math */ + if (func == FNC_VADDS || func == FNC_VSUBS || func == FNC_VSQRTS + || func == FNC_VMULS || func == FNC_VDIVS) + si_code = simd_fp_emul_s(pc); + if (func == FNC_VADDD || func == FNC_VSUBD || func == FNC_VSQRTD + || func == FNC_VMULD || func == FNC_VDIVD) + si_code = simd_fp_emul_d(pc); + if (func == FNC_VFCMPUN || func == FNC_VFCMPLT || func == FNC_VFCMPLE + || func == FNC_VFCMPEQ) + si_code = simd_cmp_emul_d(pc); + return si_code; + } + if (opcode == OP_SIMD_MUL_ADD) {/* simd mul and add */ + func = (insn >> 10) & 0x3f; + if (func == FNC_VMAS || func == FNC_VMSS || func == FNC_VNMAS + || func == FNC_VNMSS) { + si_code = simd_mul_add_fp_emul_s(pc); + return si_code; + } + + if (func == FNC_VMAD || func == FNC_VMSD || func == FNC_VNMAD + || func == FNC_VNMSD) { + si_code = simd_mul_add_fp_emul_d(pc); + return si_code; + } + func = (insn >> 5) & 0xff; + } + + if (opcode == OP_MUL_ADD) { + si_code = mul_add_fp_emul(pc); + return si_code; + } + switch (func) { + case FOP_FNC_SUBS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SUB_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_SUBD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SUB_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_ADDS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_ADD_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_ADDD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_ADD_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_MULS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_MUL_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_MULD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_MUL_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_DIVS: + pr_debug("FOP_FNC_DIVS\n"); + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_DIV_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_DIVD: + pr_debug("FOP_FNC_DIVD\n"); + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_DIV_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_SQRTS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SQRT_S(SR, SB); + goto pack_s; + case FOP_FNC_SQRTD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SQRT_D(DR, DB); + goto pack_d; + } + + + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + if ((func & ~0xf) == FOP_FNC_CMPEQ) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + switch (func) { + case FOP_FNC_CMPUN: + if (res != 3) + vc = 0; + break; + case FOP_FNC_CMPEQ: + if (res) + vc = 0; + break; + case FOP_FNC_CMPLT: + if (res != -1) + vc = 0; + break; + case FOP_FNC_CMPLE: + if ((long)res > 0) + vc = 0; + break; + } + goto done_d; + } + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + if (func == FOP_FNC_CVTSD) { + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SB, &vb); + DR_c = DB_c; + DR_s = DB_s; + DR_e = DB_e + (1024 - 128); + DR_f = SB_f << (52 - 23); + goto pack_d; + } + + if (func == FOP_FNC_CVTDS) { + FP_CONV(S, D, 1, 1, SR, DB); + goto pack_s; + } + + if (func == FOP_FNC_CVTDL || func == FOP_FNC_CVTDL_G || func == FOP_FNC_CVTDL_P + || func == FOP_FNC_CVTDL_Z || func == FOP_FNC_CVTDL_N) { + mode_bk = mode; + if (func == FOP_FNC_CVTDL_Z) + mode = 0x0UL; + else if (func == FOP_FNC_CVTDL_N) + mode = 0x1UL; + else if (func == FOP_FNC_CVTDL_G) + mode = 0x2UL; + else if (func == FOP_FNC_CVTDL_P) + mode = 0x3UL; + + if (DB_c == FP_CLS_NAN && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) { + /* AAHB Table B-2 says QNaN should not trigger INV */ + vc = 0; + } else + FP_TO_INT_ROUND_D(vc, DB, 64, 2); + mode = mode_bk; + goto done_d; + } + + vb = sw64_read_fp_reg(fb); + + switch (func) { + case FOP_FNC_CVTLW: + /* + * Notice: We can get here only due to an integer + * overflow. Such overflows are reported as invalid + * ops. We return the result the hw would have + * computed. + */ + vc = ((vb & 0xc0000000) << 32 | /* sign and msb */ + (vb & 0x3fffffff) << 29); /* rest of the int */ + FP_SET_EXCEPTION(FP_EX_INVALID); + goto done_d; + + case FOP_FNC_CVTLS: + FP_FROM_INT_S(SR, ((long)vb), 64, long); + goto pack_s; + + case FOP_FNC_CVTLD: + FP_FROM_INT_D(DR, ((long)vb), 64, long); + goto pack_d; + } + goto bad_insn; + + +pack_s: + FP_PACK_SP(&vc, SR); + + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); + sw64_write_fp_reg_s(fc, vc); + goto done; + +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +done_d: + sw64_write_fp_reg(fc, vc); + goto done; + + /* + * Take the appropriate action for each possible + * floating-point result: + * + * - Set the appropriate bits in the FPCR + * - If the specified exception is enabled in the FPCR, + * return. The caller (entArith) will dispatch + * the appropriate signal to the translated program. + * + * In addition, properly track the exception state in software + * as described in the SW64 Architecture Handbook section 4.7.7.3. + */ +done: + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + pr_debug("SW64 before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask) +{ + unsigned long trigger_pc = regs->pc - 4; + unsigned long insn, opcode, rc, si_code = 0; + + + /* + * Turn off the bits corresponding to registers that are the + * target of instructions that set bits in the exception + * summary register. We have some slack doing this because a + * register that is the target of a trapping instruction can + * be written at most once in the trap shadow. + * + * Branches, jumps, TRAPBs, EXCBs and calls to HMcode all + * bound the trap shadow, so we need not look any further than + * up to the first occurrence of such an instruction. + */ + while (write_mask) { + get_user(insn, (__u32 *)(trigger_pc)); + opcode = insn >> 26; + rc = insn & 0x1f; + + switch (opcode) { + case OPC_HMC: + case OPC_JSR: + case 0x30 ... 0x3f: /* branches */ + goto egress; + + case OPC_MISC: + switch (insn & 0xffff) { + case MISC_TRAPB: + case MISC_EXCB: + goto egress; + + default: + break; + } + break; + + case OPC_INTA: + case OPC_INTL: + case OPC_INTS: + case OPC_INTM: + write_mask &= ~(1UL << rc); + break; + + case OPC_FLTC: + case OPC_FLTV: + case OPC_FLTI: + case OPC_FLTL: + write_mask &= ~(1UL << (rc + 32)); + break; + } + if (!write_mask) { + /* Re-execute insns in the trap-shadow. */ + regs->pc = trigger_pc + 4; + si_code = sw64_fp_emul(trigger_pc); + goto egress; + } + trigger_pc -= 4; + } + +egress: + return si_code; +} + +#define WORKING_PART_0 0 +#define WORKING_PART_1 1 +#define WORKING_PART_2 2 +#define WORKING_PART_3 3 + + +/* + * This is for sw64 + */ + +long simd_cmp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD floating-CMP math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + break; + } + pr_debug("Before unpack va:%#lx, vb:%#lx\n", va, vb); + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + pr_debug("DA_e:%d, _FP_FRAC_ZEROP_1(DA):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DB_e:%d, _FP_FRAC_ZEROP_1(DB):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DA iszero:%d, DB iszero:%d\n", ((!DA_e && _FP_FRAC_ZEROP_1(DA)) ? 1 : 0), + ((!DB_e && _FP_FRAC_ZEROP_1(DB)))); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + pr_debug("res:%d\n", res); + switch (func) { + case FNC_VFCMPUN: + if (res != 3) + vc = 0; + break; + case FNC_VFCMPEQ: + if (res) + vc = 0; + break; + case FNC_VFCMPLT: + if (res != -1) + vc = 0; + break; + case FNC_VFCMPLE: + if ((long)res > 0) + vc = 0; + break; + } +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD D-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + switch (func) { + case FNC_VSUBD: + pr_debug("FNC_VSUBD\n"); + FP_SUB_D(DR, DA, DB); + goto pack_d; + case FNC_VMULD: + pr_debug("FNC_VMULD\n"); + FP_MUL_D(DR, DA, DB); + goto pack_d; + case FNC_VADDD: + pr_debug("FNC_VADDD\n"); + FP_ADD_D(DR, DA, DB); + goto pack_d; + case FNC_VDIVD: + pr_debug("FNC_VDIVD\n"); + FP_DIV_D(DR, DA, DB); + goto pack_d; + case FNC_VSQRTD: + pr_debug("FNC_VSQRTD\n"); + FP_SQRT_D(DR, DB); + goto pack_d; + } +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_d, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD S-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART0: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART1: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART2: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART3: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + + switch (func) { + case FNC_VSUBS: + pr_debug("FNC_VSUBS\n"); + FP_SUB_S(SR, SA, SB); + goto pack_s; + case FNC_VMULS: + pr_debug("FNC_VMULS\n"); + FP_MUL_S(SR, SA, SB); + goto pack_s; + case FNC_VADDS: + pr_debug("FNC_VADDS\n"); + FP_ADD_S(SR, SA, SB); + goto pack_s; + case FNC_VDIVS: + pr_debug("FNC_VDIVS\n"); + FP_DIV_S(SR, SA, SB); + goto pack_s; + case FNC_VSQRTS: + pr_debug("FNC_VSQRTS\n"); + FP_SQRT_S(SR, SB); + goto pack_s; + } +pack_s: + FP_PACK_SP(&vc, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_s, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + pr_debug("fex_p0: fpcr_p0:%#lx\n", fpcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + pr_debug("fex_p1: fpcr_p1:%#lx\n", fpcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + pr_debug("fex_p2: fpcr_p2:%#lx\n", fpcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + pr_debug("fex_p3: fpcr_p3:%#lx\n", fpcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; + +} + +static inline unsigned long negative_value(unsigned long va) +{ + return (va ^ 0x8000000000000000UL); +} + +static inline unsigned long s_negative_value(unsigned long va) +{ + return (va ^ 0x80000000UL); +} + +/* + * sw64 mul-add floating emulation + */ +long mul_add_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_S(S_ZERO); + FP_DECL_D(D_ZERO); + FP_DECL_S(S_TMP2); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("===== Entering SW64 MUL-ADD Emulation =====\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) { + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + vc = sw64_read_fp_reg_s(fc); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + } + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + vc = sw64_read_fp_reg(fc); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + } + pr_debug("va = %#lx, vb = %#lx, vc = %#lx\n", va, vb, vc); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + default: + goto bad_insn; + + } +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg_s(fd, vd); + goto done; + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg(fd, vd); + +done: + pr_debug("vd = %#lx\n", vd); + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); /** wrfpcr will destroy vector register! */ + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) + sw64_write_fp_reg_s(fd, vd); + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) + sw64_write_fp_reg(fd, vd); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_mul_add_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_S(S_ZERO); + FP_DECL_S(S_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD S-floating mul-add emulation =======\n"); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("hardware fpcr = %#lx\n", fpcr); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_s(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + pr_debug("FPCR_STATUS_MASK0 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + pr_debug("FPCR_STATUS_MASK1 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + default: + goto bad_insn; + } + +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_mul_add_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_D(D_ZERO); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD D-floating mul-add emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_d(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + vd = vd_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + vd = vd_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + vd = vd_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + vd = vd_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + + switch (func) { + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + + goto pack_d; + default: + goto bad_insn; + } + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +void read_fp_reg_s(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[2]; + + sw64_read_simd_fp_m_s(reg, fp); + *val_p0 = fp[0] & 0xffffffffUL; + *val_p1 = (fp[0] >> 32) & 0xffffffffUL; + *val_p2 = fp[1] & 0xffffffffUL; + *val_p3 = (fp[1] >> 32) & 0xffffffffUL; +} + +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[4]; + + sw64_read_simd_fp_m_d(reg, fp); + *val_p0 = fp[0]; + *val_p1 = fp[1]; + *val_p2 = fp[2]; + *val_p3 = fp[3]; +} + +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + unsigned long fp[2]; + + fp[0] = ((val_p1 & 0xffffffffUL) << 32) | (val_p0 & 0xffffffffUL); + fp[1] = ((val_p3 & 0xffffffffUL) << 32) | (val_p2 & 0xffffffffUL); + sw64_write_simd_fp_reg_s(reg, fp[0], fp[1]); +} + +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + sw64_write_simd_fp_reg_d(reg, val_p0, val_p1, val_p2, val_p3); +} diff --git a/arch/sw_64/math-emu/qrnnd.S b/arch/sw_64/math-emu/qrnnd.S new file mode 100644 index 000000000000..1e732f2e68c0 --- /dev/null +++ b/arch/sw_64/math-emu/qrnnd.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + # __udiv_qrnnd + # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. + + # This file is part of GCC. + + .set noreorder + .set noat + + .text + + .globl __udiv_qrnnd + .ent __udiv_qrnnd +__udiv_qrnnd: + .frame $30, 0, $26, 0 + .prologue 0 + + # ldiq $2,16 + ldi $2, 16($31) + blt $19, $largedivisor + +$loop1: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop1 + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$largedivisor: + and $18, 1, $4 + + srl $18, 1, $18 + sll $17, 63, $3 + or $3, $18, $18 + srl $17, 1, $17 + + and $19, 1, $6 + srl $19, 1, $5 + addl $5, $6, $5 + +$loop2: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop2 + + addl $17, $17, $17 + addl $4, $17, $17 + bne $6, $Odd + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$Odd: + # q' in $18. r' in $17 + addl $17, $18, $17 + + cmpult $17, $18, $3 # $3 := carry from addl + subl $17, $19, $at + addl $18, $3, $18 + selne $3, $at, $17, $17 + + cmpult $17, $19, $3 + addl $18, 1, $at + seleq $3, $at, $18, $18 + subl $17, $19, $at + seleq $3, $at, $17, $17 + + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + + .end __udiv_qrnnd diff --git a/arch/sw_64/math-emu/sfp-util.h b/arch/sw_64/math-emu/sfp-util.h new file mode 100644 index 000000000000..0769c0223e0d --- /dev/null +++ b/arch/sw_64/math-emu/sfp-util.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_MATH_EMU_SFP_UTIL_H +#define _SW64_MATH_EMU_SFP_UTIL_H + +#include +#include +#include +#include +#include + +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl))) + +#define umul_ppmm(wh, wl, u, v) \ + __asm__ ("mull %2, %3, %1; umulh %2, %3, %0" \ + : "=r" ((UDItype)(wh)), \ + "=&r" ((UDItype)(wl)) \ + : "r" ((UDItype)(u)), \ + "r" ((UDItype)(v))) + +#define udiv_qrnnd(q, r, n1, n0, d) \ +do { unsigned long __r; \ + (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ + (r) = __r; \ +} while (0) +extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, + unsigned long, unsigned long); + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() goto bad_insn + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN -1 +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _SW64_MATH_EMU_SFP_UTIL_H */ -- Gitee From 34473621c6b924baface67ec7e6e2377d4fd862b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:15 +0800 Subject: [PATCH 302/953] anolis: sw64: add basic IO support ANBZ: #4688 Add basic IO support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/early_ioremap.h | 29 ++ arch/sw_64/include/asm/io.h | 288 ++++++++++++++++ arch/sw_64/include/asm/sw64io.h | 109 ++++++ arch/sw_64/include/asm/uncore_io_junzhang.h | 201 +++++++++++ .../include/asm/uncore_io_ops_junzhang.h | 39 +++ .../sw_64/include/asm/uncore_io_ops_xuelang.h | 65 ++++ arch/sw_64/include/asm/uncore_io_xuelang.h | 323 ++++++++++++++++++ 7 files changed, 1054 insertions(+) create mode 100644 arch/sw_64/include/asm/early_ioremap.h create mode 100644 arch/sw_64/include/asm/io.h create mode 100644 arch/sw_64/include/asm/sw64io.h create mode 100644 arch/sw_64/include/asm/uncore_io_junzhang.h create mode 100644 arch/sw_64/include/asm/uncore_io_ops_junzhang.h create mode 100644 arch/sw_64/include/asm/uncore_io_ops_xuelang.h create mode 100644 arch/sw_64/include/asm/uncore_io_xuelang.h diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h new file mode 100644 index 000000000000..172b96a401cb --- /dev/null +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EARLY_IOREMAP_H +#define _ASM_SW64_EARLY_IOREMAP_H + +#include +#include + +static inline void __iomem * +early_ioremap(unsigned long phys_addr, unsigned long size) +{ + unsigned long y = 0; + + if (phys_addr >= __START_KERNEL_map) { + y = (unsigned long) phys_to_virt(__pa(phys_addr)); + } else { + y = phys_addr; + y |= PAGE_OFFSET; + } + + return (void __iomem *) y; +} +#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) + +static inline void early_iounmap(volatile void __iomem *addr, unsigned long size) +{ +} +#define early_memunmap(addr, size) early_iounmap(addr, size) + +#endif /* _ASM_SW64_EARLY_IOREMAP_H */ diff --git a/arch/sw_64/include/asm/io.h b/arch/sw_64/include/asm/io.h new file mode 100644 index 000000000000..2b045be5257e --- /dev/null +++ b/arch/sw_64/include/asm/io.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IO_H +#define _ASM_SW64_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* The generic header contains only prototypes. Including it ensures that + * the implementation we have here matches that interface. + */ +#include + +/* We don't use IO slowdowns on the sw64, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +#define page_to_phys(page) page_to_pa(page) + +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffffffffffffffff + +/* + * Generic IO read/write. These perform native-endian accesses. + */ + +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("stb %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("sth %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("stw %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("stl %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("ldbu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("ldhu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("ldw %0, 0(%1)\n" + "zapnot %0, 0xf, %0\n" + : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + + asm volatile("ldl %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ + +#define __iormb() rmb() +#define __iowmb() wmb() +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) __raw_readb(c) +#define readw_relaxed(c) __raw_readw(c) +#define readl_relaxed(c) __raw_readl(c) +#define readq_relaxed(c) __raw_readq(c) + +#define writeb_relaxed(v, c) __raw_writeb((v), (c)) +#define writew_relaxed(v, c) __raw_writew((v), (c)) +#define writel_relaxed(v, c) __raw_writel((v), (c)) +#define writeq_relaxed(v, c) __raw_writeq((v), (c)) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v, c) ({ __iowmb(); writeb_relaxed((v), (c)); }) +#define writew(v, c) ({ __iowmb(); writew_relaxed((v), (c)); }) +#define writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) +#define writeq(v, c) ({ __iowmb(); writeq_relaxed((v), (c)); }) +/* + * We always have external versions of these routines. + */ +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl + +static inline void __iomem *__ioremap(phys_addr_t addr, size_t size, + pgprot_t prot) +{ + unsigned long tmp = addr | PAGE_OFFSET; + + return (void __iomem *)(tmp); +} + +#define ioremap(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_cache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_uc ioremap_nocache + +#define ioport_map ioport_map +#define ioport_unmap ioport_unmap + +static inline void __iounmap(volatile void __iomem *addr) +{ +} + +#define iounmap __iounmap + +#define ioread16be(p) be16_to_cpu(ioread16(p)) +#define ioread32be(p) be32_to_cpu(ioread32(p)) +#define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p)) +#define iowrite32be(v, p) iowrite32(cpu_to_be32(v), (p)) + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + + +/* + * String version of IO memory access ops: + */ +#define memcpy_fromio memcpy_fromio +extern void memcpy_fromio(void *buffer, const volatile void __iomem *addr, long len); + +#define memcpy_toio memcpy_toio +extern void memcpy_toio(volatile void __iomem *addr, const void *buffer, long len); + +extern void _memset_c_io(volatile void __iomem *addr, unsigned long c, long len); + +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} + +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} + +/* + * String versions of in/out ops: + */ +extern void insb(unsigned long port, void *dst, unsigned long count); +extern void insw(unsigned long port, void *dst, unsigned long count); +extern void insl(unsigned long port, void *dst, unsigned long count); +extern void outsb(unsigned long port, const void *src, unsigned long count); +extern void outsw(unsigned long port, const void *src, unsigned long count); +extern void outsl(unsigned long port, const void *src, unsigned long count); + +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl + +/* + * These defines will override the defaults when doing RTC queries + */ + +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 0 + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +/* + * These get provided from since sw64 does not + * select GENERIC_IOMAP. + */ +#define ioread8 ioread8 +#define ioread16 ioread16 +#define ioread32 ioread32 +#define ioread64 ioread64 +#define iowrite8 iowrite8 +#define iowrite16 iowrite16 +#define iowrite32 iowrite32 +#define iowrite64 iowrite64 +#define ioread64be ioread64be +#define iowrite64be iowrite64be +#define ioread8_rep ioread8_rep +#define ioread16_rep ioread16_rep +#define ioread32_rep ioread32_rep +#define iowrite8_rep iowrite8_rep +#define iowrite16_rep iowrite16_rep +#define iowrite32_rep iowrite32_rep +#define pci_iounmap pci_iounmap + +#include + +/* + * Change addresses as seen by the kernel (virtual) to addresses as + * seen by a device (bus), and vice versa. + * + * Note that this only works for a limited range of kernel addresses, + * and very well may not span all memory. Consider this interface + * deprecated in favour of the DMA-mapping API. + */ +static inline unsigned long __deprecated virt_to_bus(void *address) +{ + return virt_to_phys(address); +} +#define isa_virt_to_bus virt_to_bus + +static inline void * __deprecated bus_to_virt(unsigned long address) +{ + void *virt; + + /* This check is a sanity check but also ensures that bus address 0 + * maps to virtual address 0 which is useful to detect null pointers + * (the NCR driver is much simpler if NULL pointers are preserved). + */ + virt = phys_to_virt(address); + return (long)address <= 0 ? NULL : virt; +} +#define isa_bus_to_virt bus_to_virt + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_IO_H */ diff --git a/arch/sw_64/include/asm/sw64io.h b/arch/sw_64/include/asm/sw64io.h new file mode 100644 index 000000000000..d52cd8cc86bf --- /dev/null +++ b/arch/sw_64/include/asm/sw64io.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64IO_H +#define _ASM_SW64_SW64IO_H + +#include +#include + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#define MK_RC_CFG(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) +#define MK_PIU_IOR0(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) +#define MK_PIU_IOR1(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) + +static inline unsigned int +read_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + return readl(addr); +} + +static inline void +write_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset, unsigned int data) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + writel(data, addr); +} + +static inline unsigned long +read_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +read_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +sw64_io_read(unsigned long node, unsigned long reg) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + return readq(addr); +} + +static inline void +sw64_io_write(unsigned long node, unsigned long reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + writeq(data, addr); +} + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#endif /* _ASM_SW64_SW64IO_H */ diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h new file mode 100644 index 000000000000..37cfe1fd6807 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_JUNZHANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_VT_MEMIO (0xc0000000UL) +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | SPBU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0xfff00000UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define SPBU_BASE (0x3UL << 36) +#define INTPU_BASE (0x3aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38016 + +/*-----------------------addr-----------------------*/ +/* INTPU REG */ +enum { + DEVINT_MISS = INTPU_BASE | 0x100UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + SPBUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR0 = INTPU_BASE | 0x1080UL, + IINT_MISS_VECTOR1 = INTPU_BASE | 0x1100UL, + IINT_MISS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + ADR_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* SPBU CSR */ +enum { + SMP_INFO = SPBU_BASE | 0x80UL, + INIT_CTL = SPBU_BASE | 0x680UL, + CORE_ONLINE = SPBU_BASE | 0x780UL, + DLI_RLTD_FAULT = SPBU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = SPBU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = SPBU_BASE | 0xa80UL, + CFG_INFO = SPBU_BASE | 0x1100UL, + IO_START = SPBU_BASE | 0x1300UL, + I2C0_SRST_L = SPBU_BASE | 0x1900UL, + I2C1_SRST_L = SPBU_BASE | 0x1980UL, + I2C2_SRST_L = SPBU_BASE | 0x1a00UL, + MCU_DVC_INT = SPBU_BASE | 0x3000UL, + MCU_DVC_INT_EN = SPBU_BASE | 0x3080UL, + SI_FAULT_STAT = SPBU_BASE | 0x3100UL, + SI_FAULT_STAT_EN = SPBU_BASE | 0x3180UL, + SI_FAULT_INT_EN = SPBU_BASE | 0x3200UL, + ADR_CTL = SPBU_BASE | 0x3600UL, + MC_ONLINE = SPBU_BASE | 0x3780UL, + PIU_TOP0_CONFIG = SPBU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = SPBU_BASE | 0x4d00UL, + SOFT_INFO0 = SPBU_BASE | 0xa000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h new file mode 100644 index 000000000000..95a3b5c80531 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long cfg_info; + + cfg_info = sw64_io_read(0, CFG_INFO); + cfg_info = (cfg_info >> 33) & 0x3; + cpus = 1 << cfg_info; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long total_mem; + + total_mem = sw64_io_read(node, CFG_INFO) >> 3; + total_mem = (total_mem & 0xffff) << 28; + node_mem = total_mem / __get_cpu_nums(); + + return node_mem; +} + +#define __io_read_longtime(node) (0UL) +#define __io_write_longtime(node, data) do { } while (0) +#define __io_write_longtime_start_en(node, data) do { } while (0) + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, FAULT_INT_CONFIG, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_xuelang.h b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h new file mode 100644 index 000000000000..9336e473211d --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_XUELANG_H +#define _ASM_SW64_UNCORE_IO_OPS_XUELANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long trkmode; + + trkmode = sw64_io_read(0, TRKMODE); + trkmode = (trkmode >> 6) & 0x3; + cpus = 1 << trkmode; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long mc_config; + unsigned long mc_online; + unsigned long mc_cap; + unsigned long mc_num; + + mc_config = sw64_io_read(node, MC_CAP_CFG) & 0xf; + mc_cap = (1UL << mc_config) << 28; + mc_online = sw64_io_read(node, MC_ONLINE) & 0xff; + mc_num = __kernel_ctpop(mc_online); + node_mem = mc_cap * mc_num; + + return node_mem; +} + +static inline unsigned long +__io_read_longtime(int node) +{ + return sw64_io_read(node, LONG_TIME); +} + +static inline void +__io_write_longtime(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME, data); +} + +static inline void +__io_write_longtime_start_en(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME_START_EN, data); +} + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, DUAL_CG0_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG1_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG2_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG3_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG4_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG5_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG6_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG7_FAULT_INTEN, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_XUELANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_xuelang.h b/arch/sw_64/include/asm/uncore_io_xuelang.h new file mode 100644 index 000000000000..aeaadec5be16 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_xuelang.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_XUELANG_H +#define _ASM_SW64_UNCORE_IO_XUELANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0x91abc0UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define MCU_BASE (0x3UL << 36) +#define CAB0_BASE (0x10UL << 32) +#define INTPU_BASE (0x2aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define DLIA_BASE (0x20UL << 32) +#define DLIB_BASE (0x21UL << 32) +#define DLIC_BASE (0x22UL << 32) +#define DLI_PHY_CTL (0x10UL << 24) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38056 + +/*-----------------------addr-----------------------*/ +/* CAB0 REG */ +enum { + TRKMODE = CAB0_BASE | 0x80UL, +}; + +/* DLIA IO REG */ +enum { + DLIA_BWTEST_PAT = DLIA_BASE | 0x100980UL, + DLIA_PHY_VLDLANE = DLIA_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIB IO REG */ +enum { + DLIB_BWTEST_PAT = DLIB_BASE | 0x100980UL, + DLIB_PHY_VLDLANE = DLIB_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIC IO REG */ +enum { + DLIC_BWTEST_PAT = DLIC_BASE | 0x100980UL, + DLIC_PHY_VLDLANE = DLIC_BASE | DLI_PHY_CTL | 0x300UL, +}; +/* INTPU REG */ +enum { + LCORE_SLEEPY = INTPU_BASE | 0x0UL, + LCORE_SLEEP = INTPU_BASE | 0x80UL, + DEVICE_MISS = INTPU_BASE | 0x100UL, + LONG_TIME = INTPU_BASE | 0x180UL, + LCORE_IDLE = INTPU_BASE | 0x280UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + MCUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR = INTPU_BASE | 0x1100UL, + IINT_MIS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + NMI_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* MC IO REG */ +enum { + CFGDEC = 0x400UL, + CFGCR = 0x480UL, + INIT_CTRL = 0x580UL, + CFGERR = 0xd00UL, + FSMSTAT = 0xe00UL, + PUB_INTERFACE = 0x1000UL, + POWERCTRL = 0x1080UL, + CFGMR0 = 0x1280UL, + CFGMR1 = 0x1300UL, + CFGMR2 = 0x1380UL, + CFGMR3 = 0x1400UL, + PERF_CTRL = 0x1480UL, + MC_PERF0 = 0x1500UL, + CFGMR4 = 0x1800UL, + CFGMR5 = 0x1880UL, + CFGMR6 = 0x1900UL, + MC_CTRL = 0x1c00UL, + MEMSERR_P = 0x1c80UL, + MEMSERR = 0x1d00UL, +}; + +/* MCU CSR */ +enum { + SMP_INFO = MCU_BASE | 0x80UL, + INIT_CTL = MCU_BASE | 0x680UL, + MT_STATE = MCU_BASE | 0x700UL, + CORE_ONLINE = MCU_BASE | 0x780UL, + MT_INT = MCU_BASE | 0x800UL, + MT_INT_END = MCU_BASE | 0x880UL, + CPU_ID = MCU_BASE | 0x900UL, + DLI_RLTD_FAULT = MCU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = MCU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = MCU_BASE | 0xa80UL, + FAULT_SOURCE = MCU_BASE | 0xb00UL, + INT_SOURCE = MCU_BASE | 0xb80UL, + CORE_STATE0 = MCU_BASE | 0xc00UL, + CORE_STATE1 = MCU_BASE | 0xc80UL, + CFG_INFO = MCU_BASE | 0x1100UL, + MC_CAP_CFG = MCU_BASE | 0x1180UL, + IO_START = MCU_BASE | 0x1300UL, + UART_ONLINE = MCU_BASE | 0x1780UL, + I2C0_SRST_L = MCU_BASE | 0x1900UL, + I2C1_SRST_L = MCU_BASE | 0x1980UL, + I2C2_SRST_L = MCU_BASE | 0x1a00UL, + MCU_DVC_INT = MCU_BASE | 0x3000UL, + MCU_DVC_INT_EN = MCU_BASE | 0x3080UL, + SI_FAULT_STAT = MCU_BASE | 0x3100UL, + SI_FAULT_EN = MCU_BASE | 0x3180UL, + SI_FAULT_INT_EN = MCU_BASE | 0x3200UL, + FIFO_SYNSEL = MCU_BASE | 0x3400UL, + CPU_INFO = MCU_BASE | 0x3480UL, + WAKEUP_CTL = MCU_BASE | 0x3500UL, + FLAGREG = MCU_BASE | 0x3580UL, + NMI_CTL = MCU_BASE | 0x3600UL, + PIUPLL_CNT = MCU_BASE | 0x3680UL, + MC_ONLINE = MCU_BASE | 0x3780UL, + FLASH_INFO = MCU_BASE | 0x3800UL, + RTPUSROMCNT = MCU_BASE | 0x3880UL, + CLU_LV1_SEL = MCU_BASE | 0x3a80UL, + CLU_LV2_SEL = MCU_BASE | 0x3b00UL, + CLK_CTL = MCU_BASE | 0x3b80UL, + SLEEP_WAIT_CNT = MCU_BASE | 0x4980UL, + CHIP_ID = MCU_BASE | 0x4b00UL, + PIU_TOP0_CONFIG = MCU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = MCU_BASE | 0x4d00UL, + LVDS_CTL = MCU_BASE | 0x4d80UL, + LPC_DMAREQ_TOTH = MCU_BASE | 0x5100UL, + DLI_ONLINE = MCU_BASE | 0x6180UL, + LPC_DMAREQ_HADR = MCU_BASE | 0x6200UL, + PIU_PHY_SRST_H = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE0 = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE1 = MCU_BASE | 0x6300UL, + CLK_SEL_PCIE2 = MCU_BASE | 0x6380UL, + CLK_SEL_PCIE3 = MCU_BASE | 0x6400UL, + CLK_SEL_PCIE4 = MCU_BASE | 0x6480UL, + CLK_SEL_PCIE5 = MCU_BASE | 0x6500UL, + PERST_N_PCIE0 = MCU_BASE | 0x6680UL, + PERST_N_PCIE1 = MCU_BASE | 0x6700UL, + PERST_N_PCIE2 = MCU_BASE | 0x6780UL, + PERST_N_PCIE3 = MCU_BASE | 0x6800UL, + PERST_N_PCIE4 = MCU_BASE | 0x6880UL, + PERST_N_PCIE5 = MCU_BASE | 0x6900UL, + BUTTON_RST_N_PCIE0 = MCU_BASE | 0x6a80UL, + BUTTON_RST_N_PCIE1 = MCU_BASE | 0x6b00UL, + BUTTON_RST_N_PCIE2 = MCU_BASE | 0x6b80UL, + BUTTON_RST_N_PCIE3 = MCU_BASE | 0x6c00UL, + BUTTON_RST_N_PCIE4 = MCU_BASE | 0x6c80UL, + BUTTON_RST_N_PCIE5 = MCU_BASE | 0x6d00UL, + DUAL_CG0_FAULT = MCU_BASE | 0x6d80UL, + DUAL_CG1_FAULT = MCU_BASE | 0x6e00UL, + DUAL_CG2_FAULT = MCU_BASE | 0x6e80UL, + DUAL_CG3_FAULT = MCU_BASE | 0x6f00UL, + DUAL_CG4_FAULT = MCU_BASE | 0x6f80UL, + DUAL_CG5_FAULT = MCU_BASE | 0x7000UL, + DUAL_CG6_FAULT = MCU_BASE | 0x7080UL, + DUAL_CG7_FAULT = MCU_BASE | 0x7100UL, + DUAL_CG0_FAULT_EN = MCU_BASE | 0x7180UL, + DUAL_CG1_FAULT_EN = MCU_BASE | 0x7200UL, + DUAL_CG2_FAULT_EN = MCU_BASE | 0x7280UL, + DUAL_CG3_FAULT_EN = MCU_BASE | 0x7300UL, + DUAL_CG4_FAULT_EN = MCU_BASE | 0x7380UL, + DUAL_CG5_FAULT_EN = MCU_BASE | 0x7400UL, + DUAL_CG6_FAULT_EN = MCU_BASE | 0x7480UL, + DUAL_CG7_FAULT_EN = MCU_BASE | 0x7500UL, + DUAL_CG0_FAULT_INTEN = MCU_BASE | 0x7580UL, + DUAL_CG1_FAULT_INTEN = MCU_BASE | 0x7600UL, + DUAL_CG2_FAULT_INTEN = MCU_BASE | 0x7680UL, + DUAL_CG3_FAULT_INTEN = MCU_BASE | 0x7700UL, + DUAL_CG4_FAULT_INTEN = MCU_BASE | 0x7780UL, + DUAL_CG5_FAULT_INTEN = MCU_BASE | 0x7800UL, + DUAL_CG6_FAULT_INTEN = MCU_BASE | 0x7880UL, + DUAL_CG7_FAULT_INTEN = MCU_BASE | 0x7900UL, + SOFT_INFO0 = MCU_BASE | 0x7f00UL, + LONG_TIME_START_EN = MCU_BASE | 0x9000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_XUELANG_H */ -- Gitee From 6c8d5d12af16c5c9400200e7b524423fbb95d1b2 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:27 +0800 Subject: [PATCH 303/953] anolis: sw64: add module support ANBZ: #4688 Add kernel module support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/module.h | 17 ++ arch/sw_64/kernel/module.c | 279 ++++++++++++++++++++++++++++++++ 2 files changed, 296 insertions(+) create mode 100644 arch/sw_64/include/asm/module.h create mode 100644 arch/sw_64/kernel/module.c diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h new file mode 100644 index 000000000000..d1663aab4097 --- /dev/null +++ b/arch/sw_64/include/asm/module.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MODULE_H +#define _ASM_SW64_MODULE_H + +#include + +struct mod_arch_specific { + unsigned int gotsecindex; +}; + +#define ARCH_SHF_SMALL SHF_SW64_GPREL + +#ifdef MODULE +asm(".section .got, \"aw\", @progbits; .align 3; .previous"); +#endif + +#endif /* _ASM_SW64_MODULE_H */ diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c new file mode 100644 index 000000000000..67264e3644a7 --- /dev/null +++ b/arch/sw_64/kernel/module.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define DEBUGP(fmt...) + +/* Allocate the GOT at the end of the core sections. */ + +struct got_entry { + struct got_entry *next; + Elf64_Sxword r_addend; + int got_offset; +}; + +static inline void +process_reloc_for_got(Elf64_Rela *rela, + struct got_entry *chains, Elf64_Xword *poffset) +{ + unsigned long r_sym = ELF64_R_SYM(rela->r_info); + unsigned long r_type = ELF64_R_TYPE(rela->r_info); + Elf64_Sxword r_addend = rela->r_addend; + struct got_entry *g; + + if (r_type != R_SW64_LITERAL) + return; + + for (g = chains + r_sym; g ; g = g->next) + if (g->r_addend == r_addend) { + if (g->got_offset == 0) { + g->got_offset = *poffset; + *poffset += 8; + } + goto found_entry; + } + + g = kmalloc(sizeof(*g), GFP_KERNEL); + g->next = chains[r_sym].next; + g->r_addend = r_addend; + g->got_offset = *poffset; + *poffset += 8; + chains[r_sym].next = g; + + found_entry: + /* + * Trick: most of the ELF64_R_TYPE field is unused. There are + * 42 valid relocation types, and a 32-bit field. Co-opt the + * bits above 256 to store the got offset for this reloc. + */ + rela->r_info |= g->got_offset << 8; +} + +int +module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, + char *secstrings, struct module *me) +{ + struct got_entry *chains; + Elf64_Rela *rela; + Elf64_Shdr *esechdrs, *symtab, *s, *got; + unsigned long nsyms, nrela, i; + + esechdrs = sechdrs + hdr->e_shnum; + symtab = got = NULL; + + /* Find out how large the symbol table is. Allocate one got_entry + * head per symbol. Normally this will be enough, but not always. + * We'll chain different offsets for the symbol down each head. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_SYMTAB) + symtab = s; + else if (!strcmp(".got", secstrings + s->sh_name)) { + got = s; + me->arch.gotsecindex = s - sechdrs; + } + + if (!symtab) { + pr_err("module %s: no symbol table\n", me->name); + return -ENOEXEC; + } + if (!got) { + pr_err("module %s: no got section\n", me->name); + return -ENOEXEC; + } + + nsyms = symtab->sh_size / sizeof(Elf64_Sym); + chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL); + if (!chains) { + pr_err("module %s: no memory for symbol chain buffer\n", + me->name); + return -ENOMEM; + } + + got->sh_size = 0; + got->sh_addralign = 8; + got->sh_type = SHT_NOBITS; + + /* Examine all LITERAL relocations to find out what GOT entries + * are required. This sizes the GOT section as well. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_RELA) { + nrela = s->sh_size / sizeof(Elf64_Rela); + rela = (void *)hdr + s->sh_offset; + for (i = 0; i < nrela; ++i) + process_reloc_for_got(rela+i, chains, + &got->sh_size); + } + + /* Free the memory we allocated. */ + for (i = 0; i < nsyms; ++i) { + struct got_entry *g, *n; + + for (g = chains[i].next; g ; g = n) { + n = g->next; + kfree(g); + } + } + kfree(chains); + + return 0; +} + +int +apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; + unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); + Elf64_Sym *symtab, *sym; + void *base, *location; + unsigned long got, gp; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; + + /* The small sections were sorted to the end of the segment. + * The following should definitely cover them. + */ + got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; + + for (i = 0; i < n; i++) { + unsigned long r_sym = ELF64_R_SYM(rela[i].r_info); + unsigned long r_type = ELF64_R_TYPE(rela[i].r_info); + unsigned long r_got_offset = r_type >> 8; + unsigned long value, hi, lo; + + r_type &= 0xff; + + /* This is where to make the change. */ + location = base + rela[i].r_offset; + + /* This is the symbol it is referring to. Note that all + * unresolved symbols have been resolved. + */ + sym = symtab + r_sym; + value = sym->st_value + rela[i].r_addend; + + switch (r_type) { + case R_SW64_NONE: + break; + case R_SW64_REFLONG: + *(u32 *)location = value; + break; + case R_SW64_REFQUAD: + /* BUG() can produce misaligned relocations. */ + ((u32 *)location)[0] = value; + ((u32 *)location)[1] = value >> 32; + break; + case R_SW64_GPREL32: + value -= gp; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_LITERAL: + hi = got + r_got_offset; + lo = hi - gp; + if ((short)lo != lo) { + unsigned long over_offset = (lo + 0x8000) >> 16; + + if ((over_offset & 0x8000) == 0) { + *(u16 *)(location - 0x4) = over_offset; + *(u16 *)location = lo - ((over_offset << 16) + gp); + *(u64 *)hi = value; + } else { + goto reloc_overflow; + } + } else { + *(u16 *)location = lo; + *(u64 *)hi = value; + } + break; + case R_SW64_LITERAL_GOT: + /* empty for now need to fill */ + break; + case R_SW64_LITUSE: + break; + case R_SW64_GPDISP: + value = gp - (u64)location; + lo = (short)value; + hi = (int)(value - lo); + if (hi + lo != value) + goto reloc_overflow; + *(u16 *)location = hi >> 16; + *(u16 *)(location + rela[i].r_addend) = lo; + break; + case R_SW64_BRSGP: + /* + * BRSGP is only allowed to bind to local symbols. + * If the section is undef, this means that the + * value was resolved from somewhere else. + */ + if (sym->st_shndx == SHN_UNDEF) + goto reloc_overflow; + if ((sym->st_other & STO_SW64_STD_GPLOAD) == + STO_SW64_STD_GPLOAD) + /* Omit the prologue. */ + value += 8; + fallthrough; + case R_SW64_BRADDR: + value -= (u64)location + 4; + if (value & 3) + goto reloc_overflow; + value = (long)value >> 2; + if (value + (1<<21) >= 1<<22) + goto reloc_overflow; + value &= 0x1fffff; + value |= *(u32 *)location & ~0x1fffff; + *(u32 *)location = value; + break; + case R_SW64_HINT: + break; + case R_SW64_SREL32: + value -= (u64)location; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_SREL64: + value -= (u64)location; + *(u64 *)location = value; + break; + case R_SW64_GPRELHIGH: + value = (long)(value - gp + 0x8000) >> 16; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + case R_SW64_GPRELLOW: + value -= gp; + *(u16 *)location = value; + break; + case R_SW64_GPREL16: + value -= gp; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + default: + pr_err("module %s: Unknown relocation: %lu\n", me->name, r_type); + return -ENOEXEC; +reloc_overflow: + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + pr_err("module %s: Relocation (type %lu) overflow vs section %d\n", + me->name, r_type, sym->st_shndx); + else + pr_err("module %s: Relocation (type %lu) overflow vs %s\n", + me->name, r_type, strtab + sym->st_name); + return -ENOEXEC; + } + } + + return 0; +} -- Gitee From 91ae6aa5c2354137e0d648ad78287ecda2ccbdbe Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:33 +0800 Subject: [PATCH 304/953] anolis: sw64: add some common routines ANBZ: #4688 Add some other common routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/debug.h | 38 ++++ arch/sw_64/include/asm/extable.h | 47 +++++ arch/sw_64/include/asm/futex.h | 168 +++++++++++++++++ arch/sw_64/kernel/asm-offsets.c | 240 ++++++++++++++++++++++++ arch/sw_64/kernel/audit.c | 61 ++++++ arch/sw_64/kernel/early_printk.c | 183 ++++++++++++++++++ arch/sw_64/kernel/entry.S | 306 +++++++++++++++++++++++++++++++ 7 files changed, 1043 insertions(+) create mode 100644 arch/sw_64/include/asm/debug.h create mode 100644 arch/sw_64/include/asm/extable.h create mode 100644 arch/sw_64/include/asm/futex.h create mode 100644 arch/sw_64/kernel/asm-offsets.c create mode 100644 arch/sw_64/kernel/audit.c create mode 100644 arch/sw_64/kernel/early_printk.c create mode 100644 arch/sw_64/kernel/entry.S diff --git a/arch/sw_64/include/asm/debug.h b/arch/sw_64/include/asm/debug.h new file mode 100644 index 000000000000..8db5a8bb9ab7 --- /dev/null +++ b/arch/sw_64/include/asm/debug.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Mao Minkai + * Author: Mao Minkai + * + * This code is taken from arch/mips/include/asm/debug.h + * Copyright (C) 2015 Imagination Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _ASM_SW64_DEBUG_H +#define _ASM_SW64_DEBUG_H + +#include + +/* + * sw64_debugfs_dir corresponds to the "sw_64" directory at the top level + * of the DebugFS hierarchy. SW64-specific DebugFS entries should be + * placed beneath this directory. + */ +extern struct dentry *sw64_debugfs_dir; + +#define UNA_MAX_ENTRIES 64 + +struct unaligned_stat { + unsigned long pc; + unsigned long va; +}; + +extern char unaligned_task[]; +extern unsigned long unaligned_count; +extern struct unaligned_stat unaligned[]; + +#endif /* _ASM_SW64_DEBUG_H */ diff --git a/arch/sw_64/include/asm/extable.h b/arch/sw_64/include/asm/extable.h new file mode 100644 index 000000000000..42f872ce6c3b --- /dev/null +++ b/arch/sw_64/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EXTABLE_H +#define _ASM_SW64_EXTABLE_H + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry { + signed int insn; + union exception_fixup { + unsigned int unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs, unsigned long pc); + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ + do { \ + (a)->fixup.unit = (b)->fixup.unit; \ + (b)->fixup.unit = (tmp).fixup.unit; \ + } while (0) + +#endif /* _ASM_SW64_EXTABLE_H */ diff --git a/arch/sw_64/include/asm/futex.h b/arch/sw_64/include/asm/futex.h new file mode 100644 index 000000000000..783799813980 --- /dev/null +++ b/arch/sw_64/include/asm/futex.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FUTEX_H +#define _ASM_SW64_FUTEX_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifdef CONFIG_SUBARCH_C3B + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + " ldi %2, 1\n" \ + " wr_f %2\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: br 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " wr_f %2\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " rd_f %3\n" + " beq %2, 3f\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: br 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#else /* !CONFIG_SUBARCH_C3B */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: lbr 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " beq %2, 3f\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: lbr 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#endif /* CONFIG_SUBARCH_C3B */ + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval = 0, ret; + unsigned long tmp; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("addw %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andnot %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FUTEX_H */ diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c new file mode 100644 index 000000000000..41310a8a7af1 --- /dev/null +++ b/arch/sw_64/kernel/asm-offsets.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ +#include +#include +#include +#include + +#include +#include + +#include "traps.c" +#include "signal.c" + +void foo(void) +{ + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + BLANK(); + + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); + DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); + DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); + DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_SMP + DEFINE(TASK_CPU, offsetof(struct task_struct, thread_info.cpu)); +#endif + BLANK(); + + OFFSET(PSTATE_REGS, processor_state, regs); + OFFSET(PSTATE_FPREGS, processor_state, fpregs); + OFFSET(PSTATE_FPCR, processor_state, fpcr); + OFFSET(PSTATE_KTP, processor_state, ktp); +#ifdef CONFIG_HIBERNATION + OFFSET(PSTATE_SP, processor_state, sp); +#endif + OFFSET(PBE_ADDR, pbe, address); + OFFSET(PBE_ORIG_ADDR, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + OFFSET(CALLEE_R9, callee_saved_regs, r9); + OFFSET(CALLEE_R10, callee_saved_regs, r10); + OFFSET(CALLEE_R11, callee_saved_regs, r11); + OFFSET(CALLEE_R12, callee_saved_regs, r12); + OFFSET(CALLEE_R13, callee_saved_regs, r13); + OFFSET(CALLEE_R14, callee_saved_regs, r14); + OFFSET(CALLEE_R15, callee_saved_regs, r15); + OFFSET(CALLEE_RA, callee_saved_regs, ra); + OFFSET(CALLEE_F2, callee_saved_fpregs, f2); + OFFSET(CALLEE_F3, callee_saved_fpregs, f3); + OFFSET(CALLEE_F4, callee_saved_fpregs, f4); + OFFSET(CALLEE_F5, callee_saved_fpregs, f5); + OFFSET(CALLEE_F6, callee_saved_fpregs, f6); + OFFSET(CALLEE_F7, callee_saved_fpregs, f7); + OFFSET(CALLEE_F8, callee_saved_fpregs, f8); + OFFSET(CALLEE_F9, callee_saved_fpregs, f9); + BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_REGS_R0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS_R1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS_R2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS_R3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS_R4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS_R5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS_R6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS_R7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS_R8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS_R9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_REGS_R10, offsetof(struct pt_regs, regs[10])); + DEFINE(PT_REGS_R11, offsetof(struct pt_regs, regs[11])); + DEFINE(PT_REGS_R12, offsetof(struct pt_regs, regs[12])); + DEFINE(PT_REGS_R13, offsetof(struct pt_regs, regs[13])); + DEFINE(PT_REGS_R14, offsetof(struct pt_regs, regs[14])); + DEFINE(PT_REGS_R15, offsetof(struct pt_regs, regs[15])); + DEFINE(PT_REGS_R16, offsetof(struct pt_regs, regs[16])); + DEFINE(PT_REGS_R17, offsetof(struct pt_regs, regs[17])); + DEFINE(PT_REGS_R18, offsetof(struct pt_regs, regs[18])); + DEFINE(PT_REGS_R19, offsetof(struct pt_regs, regs[19])); + DEFINE(PT_REGS_R20, offsetof(struct pt_regs, regs[20])); + DEFINE(PT_REGS_R21, offsetof(struct pt_regs, regs[21])); + DEFINE(PT_REGS_R22, offsetof(struct pt_regs, regs[22])); + DEFINE(PT_REGS_R23, offsetof(struct pt_regs, regs[23])); + DEFINE(PT_REGS_R24, offsetof(struct pt_regs, regs[24])); + DEFINE(PT_REGS_R25, offsetof(struct pt_regs, regs[25])); + DEFINE(PT_REGS_R26, offsetof(struct pt_regs, regs[26])); + DEFINE(PT_REGS_R27, offsetof(struct pt_regs, regs[27])); + DEFINE(PT_REGS_R28, offsetof(struct pt_regs, regs[28])); + DEFINE(PT_REGS_GP, offsetof(struct pt_regs, regs[29])); + DEFINE(PT_REGS_SP, offsetof(struct pt_regs, regs[30])); + DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); + DEFINE(PT_REGS_ORIG_R0, offsetof(struct pt_regs, orig_r0)); + DEFINE(PT_REGS_ORIG_R19, offsetof(struct pt_regs, orig_r19)); + DEFINE(PT_REGS_HM_PS, offsetof(struct pt_regs, hm_ps)); + DEFINE(PT_REGS_HM_PC, offsetof(struct pt_regs, hm_pc)); + DEFINE(PT_REGS_HM_GP, offsetof(struct pt_regs, hm_gp)); + DEFINE(PT_REGS_HM_R16, offsetof(struct pt_regs, hm_r16)); + DEFINE(PT_REGS_HM_R17, offsetof(struct pt_regs, hm_r17)); + DEFINE(PT_REGS_HM_R18, offsetof(struct pt_regs, hm_r18)); + BLANK(); + + DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); + DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0)); + DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1)); + DEFINE(KVM_REGS_R2, offsetof(struct kvm_regs, r2)); + DEFINE(KVM_REGS_R3, offsetof(struct kvm_regs, r3)); + DEFINE(KVM_REGS_R4, offsetof(struct kvm_regs, r4)); + DEFINE(KVM_REGS_R5, offsetof(struct kvm_regs, r5)); + DEFINE(KVM_REGS_R6, offsetof(struct kvm_regs, r6)); + DEFINE(KVM_REGS_R7, offsetof(struct kvm_regs, r7)); + DEFINE(KVM_REGS_R8, offsetof(struct kvm_regs, r8)); + DEFINE(KVM_REGS_R9, offsetof(struct kvm_regs, r9)); + DEFINE(KVM_REGS_R10, offsetof(struct kvm_regs, r10)); + DEFINE(KVM_REGS_R11, offsetof(struct kvm_regs, r11)); + DEFINE(KVM_REGS_R12, offsetof(struct kvm_regs, r12)); + DEFINE(KVM_REGS_R13, offsetof(struct kvm_regs, r13)); + DEFINE(KVM_REGS_R14, offsetof(struct kvm_regs, r14)); + DEFINE(KVM_REGS_R15, offsetof(struct kvm_regs, r15)); + DEFINE(KVM_REGS_R19, offsetof(struct kvm_regs, r19)); + DEFINE(KVM_REGS_R20, offsetof(struct kvm_regs, r20)); + DEFINE(KVM_REGS_R21, offsetof(struct kvm_regs, r21)); + DEFINE(KVM_REGS_R22, offsetof(struct kvm_regs, r22)); + DEFINE(KVM_REGS_R23, offsetof(struct kvm_regs, r23)); + DEFINE(KVM_REGS_R24, offsetof(struct kvm_regs, r24)); + DEFINE(KVM_REGS_R25, offsetof(struct kvm_regs, r25)); + DEFINE(KVM_REGS_R26, offsetof(struct kvm_regs, r26)); + DEFINE(KVM_REGS_R27, offsetof(struct kvm_regs, r27)); + DEFINE(KVM_REGS_R28, offsetof(struct kvm_regs, r28)); + DEFINE(KVM_REGS_FPCR, offsetof(struct kvm_regs, fpcr)); + DEFINE(KVM_REGS_F0, offsetof(struct kvm_regs, fp[0 * 4])); + DEFINE(KVM_REGS_F1, offsetof(struct kvm_regs, fp[1 * 4])); + DEFINE(KVM_REGS_F2, offsetof(struct kvm_regs, fp[2 * 4])); + DEFINE(KVM_REGS_F3, offsetof(struct kvm_regs, fp[3 * 4])); + DEFINE(KVM_REGS_F4, offsetof(struct kvm_regs, fp[4 * 4])); + DEFINE(KVM_REGS_F5, offsetof(struct kvm_regs, fp[5 * 4])); + DEFINE(KVM_REGS_F6, offsetof(struct kvm_regs, fp[6 * 4])); + DEFINE(KVM_REGS_F7, offsetof(struct kvm_regs, fp[7 * 4])); + DEFINE(KVM_REGS_F8, offsetof(struct kvm_regs, fp[8 * 4])); + DEFINE(KVM_REGS_F9, offsetof(struct kvm_regs, fp[9 * 4])); + DEFINE(KVM_REGS_F10, offsetof(struct kvm_regs, fp[10 * 4])); + DEFINE(KVM_REGS_F11, offsetof(struct kvm_regs, fp[11 * 4])); + DEFINE(KVM_REGS_F12, offsetof(struct kvm_regs, fp[12 * 4])); + DEFINE(KVM_REGS_F13, offsetof(struct kvm_regs, fp[13 * 4])); + DEFINE(KVM_REGS_F14, offsetof(struct kvm_regs, fp[14 * 4])); + DEFINE(KVM_REGS_F15, offsetof(struct kvm_regs, fp[15 * 4])); + DEFINE(KVM_REGS_F16, offsetof(struct kvm_regs, fp[16 * 4])); + DEFINE(KVM_REGS_F17, offsetof(struct kvm_regs, fp[17 * 4])); + DEFINE(KVM_REGS_F18, offsetof(struct kvm_regs, fp[18 * 4])); + DEFINE(KVM_REGS_F19, offsetof(struct kvm_regs, fp[19 * 4])); + DEFINE(KVM_REGS_F20, offsetof(struct kvm_regs, fp[20 * 4])); + DEFINE(KVM_REGS_F21, offsetof(struct kvm_regs, fp[21 * 4])); + DEFINE(KVM_REGS_F22, offsetof(struct kvm_regs, fp[22 * 4])); + DEFINE(KVM_REGS_F23, offsetof(struct kvm_regs, fp[23 * 4])); + DEFINE(KVM_REGS_F24, offsetof(struct kvm_regs, fp[24 * 4])); + DEFINE(KVM_REGS_F25, offsetof(struct kvm_regs, fp[25 * 4])); + DEFINE(KVM_REGS_F26, offsetof(struct kvm_regs, fp[26 * 4])); + DEFINE(KVM_REGS_F27, offsetof(struct kvm_regs, fp[27 * 4])); + DEFINE(KVM_REGS_F28, offsetof(struct kvm_regs, fp[28 * 4])); + DEFINE(KVM_REGS_F29, offsetof(struct kvm_regs, fp[29 * 4])); + DEFINE(KVM_REGS_F30, offsetof(struct kvm_regs, fp[30 * 4])); + DEFINE(KVM_REGS_PS, offsetof(struct kvm_regs, ps)); + DEFINE(KVM_REGS_PC, offsetof(struct kvm_regs, pc)); + DEFINE(KVM_REGS_GP, offsetof(struct kvm_regs, gp)); + DEFINE(KVM_REGS_R16, offsetof(struct kvm_regs, r16)); + DEFINE(KVM_REGS_R17, offsetof(struct kvm_regs, r17)); + DEFINE(KVM_REGS_R18, offsetof(struct kvm_regs, r18)); + BLANK(); + + DEFINE(VCPU_RET_SIZE, sizeof(struct vcpu_run_ret_stack)); + DEFINE(VCPU_RET_RA, offsetof(struct vcpu_run_ret_stack, ra)); + DEFINE(VCPU_RET_R0, offsetof(struct vcpu_run_ret_stack, r0)); + BLANK(); + + DEFINE(HOST_INT_SIZE, sizeof(struct host_int_args)); + DEFINE(HOST_INT_R18, offsetof(struct host_int_args, r18)); + DEFINE(HOST_INT_R17, offsetof(struct host_int_args, r17)); + DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16)); + BLANK(); + + OFFSET(TASK_THREAD, task_struct, thread); + OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]); + OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]); + OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]); + OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]); + OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]); + OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]); + OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]); + OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]); + OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]); + OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]); + OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]); + OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]); + OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]); + OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]); + OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]); + OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]); + OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]); + OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]); + OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]); + OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]); + OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]); + OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]); + OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]); + OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]); + OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]); + OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]); + OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]); + OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]); + OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]); + OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]); + OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]); + OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr); + BLANK(); + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); + BLANK(); + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + BLANK(); + DEFINE(RT_SIGFRAME_SIZE, sizeof(struct rt_sigframe)); + OFFSET(RT_SIGFRAME_MCTX, rt_sigframe, uc.uc_mcontext); +} diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c new file mode 100644 index 000000000000..dcf58deee3e2 --- /dev/null +++ b/arch/sw_64/kernel/audit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned int dir_class[] = { +#include +~0U +}; + +static unsigned int read_class[] = { +#include +~0U +}; + +static unsigned int write_class[] = { +#include +~0U +}; + +static unsigned int chattr_class[] = { +#include +~0U +}; + +static unsigned int signal_class[] = { +#include +~0U +}; + +int audit_classify_arch(int arch) +{ + return 0; +} + +int audit_classify_syscall(int abi, unsigned int syscall) +{ + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 0; + } +} + +static int __init audit_classes_init(void) +{ + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +device_initcall(audit_classes_init); diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c new file mode 100644 index 000000000000..66af1165e89b --- /dev/null +++ b/arch/sw_64/kernel/early_printk.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned long early_serial_base; /* ttyS0 */ + +#define XMTRDY 0x20 + +#define DLAB 0x80 + +#define TXR 0 /* Transmit register (WRITE) */ +#define RXR 0 /* Receive register (READ) */ +#define IER 1 /* Interrupt Enable */ +#define IIR 2 /* Interrupt ID */ +#define FCR 2 /* FIFO control */ +#define LCR 3 /* Line control */ +#define MCR 4 /* Modem control */ +#define LSR 5 /* Line Status */ +#define MSR 6 /* Modem Status */ +#define DLL 0 /* Divisor Latch Low */ +#define DLH 1 /* Divisor latch High */ + +static void mem32_serial_out(unsigned long addr, int offset, int value) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + writel(value, vaddr + offset); +} + +static unsigned int mem32_serial_in(unsigned long addr, int offset) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + return readl(vaddr + offset); +} + +static unsigned int (*serial_in)(unsigned long addr, int offset) = mem32_serial_in; +static void (*serial_out)(unsigned long addr, int offset, int value) = mem32_serial_out; + +static int early_serial_putc(unsigned char ch) +{ + unsigned int timeout = 0xffff; + + while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout) + cpu_relax(); + serial_out(early_serial_base, TXR, ch); + + return timeout ? 0 : -1; +} + +static void early_serial_write(struct console *con, const char *s, unsigned int n) +{ + while (*s && n-- > 0) { + if (*s == '\n') + early_serial_putc('\r'); + early_serial_putc(*s); + s++; + } +} + +static unsigned int uart_get_refclk(void) +{ + return 24000000UL; +} + +static unsigned int uart_calculate_baudrate_divisor(unsigned long baudrate) +{ + unsigned int refclk = uart_get_refclk(); + + return (1 + (2 * refclk) / (baudrate * 16)) / 2; +} + +static __init void early_serial_hw_init(unsigned long baud) +{ + unsigned char c; + unsigned long divisor = uart_calculate_baudrate_divisor(baud); + + serial_out(early_serial_base, LCR, 0x3); /* 8n1 */ + serial_out(early_serial_base, IER, 0); /* no interrupt */ + serial_out(early_serial_base, FCR, 0); /* no fifo */ + serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */ + + c = serial_in(early_serial_base, LCR); + serial_out(early_serial_base, LCR, c | DLAB); + serial_out(early_serial_base, DLL, divisor & 0xff); + serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff); + serial_out(early_serial_base, LCR, c & ~DLAB); +} + +#define DEFAULT_BAUD 115200 + +static __init void early_serial_init(char *s) +{ + unsigned long baud = DEFAULT_BAUD; + int err; + + if (*s == ',') + ++s; + + if (*s) { + unsigned int port; + static const long bases[] __initconst = { 0xfff0803300000000ULL, + 0xfff0903300000000ULL }; + + if (!strncmp(s, "ttyS", 4)) + s += 4; + err = kstrtouint(s, 10, &port); + if (err || port > 1) + port = 0; + early_serial_base = bases[port]; + s += strcspn(s, ","); + if (*s == ',') + s++; + } + + if (*s) { + err = kstrtoul(s, 0, &baud); + if (err || baud == 0) + baud = DEFAULT_BAUD; + } + + /* These will always be IO based ports */ + serial_in = mem32_serial_in; + serial_out = mem32_serial_out; + + /* Set up the HW */ + early_serial_hw_init(baud); +} + +static struct console early_serial_console = { + .name = "early", + .write = early_serial_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static void early_console_register(struct console *con, int keep_early) +{ + if (con->index != -1) { + pr_crit("ERROR: earlyprintk= %s already used\n", + con->name); + return; + } + early_console = con; + + if (keep_early) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + + register_console(early_console); +} + +static int __init setup_early_printk(char *buf) +{ + int keep; + + if (!buf) + return 0; + + if (early_console) + return 0; + + keep = (strstr(buf, "keep") != NULL); + + if (!strncmp(buf, "serial", 6)) { + buf += 6; + early_serial_init(buf); + early_console_register(&early_serial_console, keep); + if (!strncmp(buf, ",ttyS", 5)) + buf += 5; + } + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S new file mode 100644 index 000000000000..59c2ff4eb915 --- /dev/null +++ b/arch/sw_64/kernel/entry.S @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel entry-points. + */ + +#include +#include +#include +#include +#include +#include + + .text + .set noat +/* + * This defines the normal kernel pt-regs layout. + * + * regs 9-15 preserved by C code, saving to pt_regs will make + * them easier to be accessed in an unified way. + * regs 16-18 saved by HMcode + * regs 29-30 saved and set up by HMcode + */ + + .macro SAVE_ALL + ldi $sp, -PT_REGS_HM_PS($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $28, PT_REGS_R28($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + ldl $1, PT_REGS_HM_R16($sp) + ldl $2, PT_REGS_HM_R17($sp) + ldl $3, PT_REGS_HM_R18($sp) + ldl $4, PT_REGS_HM_GP($sp) + ldl $5, PT_REGS_HM_PC($sp) + ldl $6, PT_REGS_HM_PS($sp) + stl $1, PT_REGS_R16($sp) + stl $2, PT_REGS_R17($sp) + stl $3, PT_REGS_R18($sp) + stl $4, PT_REGS_GP($sp) + stl $5, PT_REGS_PC($sp) + stl $6, PT_REGS_PS($sp) + and $6, 0x8, $7 + beq $7, 1f + sys_call HMC_rdusp + br 2f +1: ldi $0, PT_REGS_SIZE($sp) +2: stl $0, PT_REGS_SP($sp) + ldi $1, NO_SYSCALL + stl $1, PT_REGS_ORIG_R0($sp) + sys_call HMC_rdktp + .endm + + .macro RESTORE_ALL + ldl $16, PT_REGS_SP($sp) + /* skip wrusp if returning to kernel */ + blt $16, 1f + sys_call HMC_wrusp +1: ldl $1, PT_REGS_R16($sp) + ldl $2, PT_REGS_R17($sp) + ldl $3, PT_REGS_R18($sp) + ldl $4, PT_REGS_GP($sp) + ldl $5, PT_REGS_PC($sp) + ldl $6, PT_REGS_PS($sp) + stl $1, PT_REGS_HM_R16($sp) + stl $2, PT_REGS_HM_R17($sp) + stl $3, PT_REGS_HM_R18($sp) + stl $4, PT_REGS_HM_GP($sp) + stl $5, PT_REGS_HM_PC($sp) + stl $6, PT_REGS_HM_PS($sp) + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldi $sp, PT_REGS_HM_PS($sp) + .endm + +/* + * Non-syscall kernel entry points. + */ + + .align 4 + .globl entInt + .ent entInt +entInt: + SAVE_ALL + mov $sp, $19 + call $26, do_entInt + br ret_from_sys_call + .end entInt + + .align 4 + .globl entArith + .ent entArith +entArith: + SAVE_ALL + mov $sp, $18 + call $26, do_entArith + br ret_from_sys_call + .end entArith + + .align 4 + .globl entMM + .ent entMM +entMM: + SAVE_ALL + mov $sp, $19 + call $26, do_page_fault + br ret_from_sys_call + .end entMM + + .align 4 + .globl entIF + .ent entIF +entIF: + SAVE_ALL + mov $sp, $18 + call $26, do_entIF + br ret_from_sys_call + .end entIF + +/* + * Handle unalignment exception. + * We don't handle the "gp" register correctly, but if we fault on a + * gp-register unaligned load/store, something is _very_ wrong in the + * kernel anyway. + */ + .align 4 + .globl entUna + .ent entUna +entUna: + SAVE_ALL + mov $sp, $19 + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 /* user mode ? */ + beq $0, 1f + call $26, do_entUnaUser /* return to ret_from_syscall */ + br ret_from_sys_call +1: ldl $9, PT_REGS_GP($sp) + call $26, do_entUna + stl $9, PT_REGS_GP($sp) + RESTORE_ALL + sys_call HMC_rti + .end entUna + +/* + * The system call entry point is special. Most importantly, it looks + * like a function call to userspace as far as clobbered registers. We + * do preserve the argument registers (for syscall restarts) and $26 + * (for leaf syscall functions). + * + * So much for theory. We don't take advantage of this yet. + * + * Note that a0-a2 are not saved by HMcode as with the other entry points. + */ + + .align 4 + .globl entSys + .ent entSys +entSys: + SAVE_ALL + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + mov $sp, $16 + call $26, do_entSys + br ret_from_sys_call + .end entSys + + .align 4 + .globl ret_from_sys_call + .ent ret_from_sys_call +ret_from_sys_call: +#ifdef CONFIG_SUBARCH_C3B + fillcs 0($sp) /* prefetch */ + fillcs 128($sp) /* prefetch */ +#endif + br $27, 1f +1: ldgp $29, 0($27) + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + ldi $16, 7 + sys_call HMC_swpipl + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 + beq $0, restore_all +ret_to_user: + ldw $17, TI_FLAGS($8) + and $17, _TIF_WORK_MASK, $2 + beq $2, restore_all + mov $sp, $16 + call $26, do_notify_resume +restore_all: + RESTORE_ALL + sys_call HMC_rti + .end ret_from_sys_call + +/* + * Integer register context switch + * The callee-saved registers must be saved and restored. + * + * a0: previous task_struct (must be preserved across the switch) + * a1: next task_struct + * + * The value of a0 must be preserved by this function, as that's how + * arguments are passed to schedule_tail. + */ + .align 4 + .globl __switch_to + .ent __switch_to +__switch_to: + .prologue 0 + /* Save context into prev->thread */ + stl $26, TASK_THREAD_RA($16) + stl $30, TASK_THREAD_SP($16) + stl $9, TASK_THREAD_S0($16) + stl $10, TASK_THREAD_S1($16) + stl $11, TASK_THREAD_S2($16) + stl $12, TASK_THREAD_S3($16) + stl $13, TASK_THREAD_S4($16) + stl $14, TASK_THREAD_S5($16) + stl $15, TASK_THREAD_S6($16) + /* Restore context from next->thread */ + ldl $26, TASK_THREAD_RA($17) + ldl $30, TASK_THREAD_SP($17) + ldl $9, TASK_THREAD_S0($17) + ldl $10, TASK_THREAD_S1($17) + ldl $11, TASK_THREAD_S2($17) + ldl $12, TASK_THREAD_S3($17) + ldl $13, TASK_THREAD_S4($17) + ldl $14, TASK_THREAD_S5($17) + ldl $15, TASK_THREAD_S6($17) + mov $17, $8 + sys_call HMC_wrktp + mov $16, $0 + ret + .end __switch_to + +/* + * New processes begin life here. + */ + + .globl ret_from_fork + .align 4 + .ent ret_from_fork +ret_from_fork: + call $26, schedule_tail + br ret_from_sys_call + .end ret_from_fork + +/* + * ... and new kernel threads - here + */ + .align 4 + .globl ret_from_kernel_thread + .ent ret_from_kernel_thread +ret_from_kernel_thread: + call $26, schedule_tail + mov $9, $27 + mov $10, $16 + call $26, ($9) + br ret_to_user + .end ret_from_kernel_thread -- Gitee From 1e96fbe0653fd249fe3e02b8a98360708409ba5f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:34 +0800 Subject: [PATCH 305/953] anolis: sw64: add some other routines ANBZ: #4688 Add some uncommon routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/dup_print.c | 88 +++++++++++++++++++++++++++++++++++ arch/sw_64/kernel/proc_misc.c | 25 ++++++++++ arch/sw_64/kernel/proto.h | 18 +++++++ arch/sw_64/kernel/segvdbg.c | 26 +++++++++++ arch/sw_64/kernel/tc.c | 36 ++++++++++++++ arch/sw_64/kernel/termios.c | 62 ++++++++++++++++++++++++ 6 files changed, 255 insertions(+) create mode 100644 arch/sw_64/kernel/dup_print.c create mode 100644 arch/sw_64/kernel/proc_misc.c create mode 100644 arch/sw_64/kernel/proto.h create mode 100644 arch/sw_64/kernel/segvdbg.c create mode 100644 arch/sw_64/kernel/tc.c create mode 100644 arch/sw_64/kernel/termios.c diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c new file mode 100644 index 000000000000..439ac75feb01 --- /dev/null +++ b/arch/sw_64/kernel/dup_print.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SW64_RRK + +#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) + +static DEFINE_SPINLOCK(printk_lock); + +unsigned long sw64_printk_offset; +#define PRINTK_SIZE 0x100000UL + +int sw64_printk(const char *fmt, va_list args) +{ + char *sw64_printk_buf; + int printed_len = 0; + unsigned long flags; + + spin_lock_irqsave(&printk_lock, flags); + + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + + if (sw64_printk_offset >= (PRINTK_SIZE-1024)) { //printk wrapped + sw64_printk_offset = 0; + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + memset(sw64_printk_buf, 0, PRINTK_SIZE); + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + } else { + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + if (is_in_emul()) { + void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE); + u64 data = ((u64)sw64_printk_buf & 0xffffffffUL) + | ((u64)printed_len << 32); + *(u64 *)addr = data; + } + } + sw64_printk_offset += printed_len; + spin_unlock_irqrestore(&printk_lock, flags); + return printed_len; +} +#endif + +#ifdef CONFIG_SW64_RRU +#include + +static DEFINE_SPINLOCK(printf_lock); +#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) +#define USER_PRINT_BUFF_LEN 0x100000UL +#define USER_MESSAGE_MAX_LEN 0x100000UL +unsigned long sw64_printf_offset; +int sw64_user_printf(const char __user *buf, int len) +{ + static char *user_printf_buf; + unsigned long flags; + + if (current->pid <= 0) + return 0; + + /* + * do not write large (fake) message which may not be from + * STDOUT/STDERR any more as file descriptor could be duplicated + * in a pipe. + */ + if (len > USER_MESSAGE_MAX_LEN) + return 0; + + spin_lock_irqsave(&printf_lock, flags); + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + + if (sw64_printf_offset == 0) + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + + if ((sw64_printf_offset + len) > USER_PRINT_BUFF_LEN) { + sw64_printf_offset = 0; + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + } + copy_from_user(user_printf_buf, buf, len); + sw64_printf_offset += len; + spin_unlock_irqrestore(&printf_lock, flags); + return 0; +} +#endif diff --git a/arch/sw_64/kernel/proc_misc.c b/arch/sw_64/kernel/proc_misc.c new file mode 100644 index 000000000000..ca107ec1e05e --- /dev/null +++ b/arch/sw_64/kernel/proc_misc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +extern const struct seq_operations cpu_active_mask_op; +static int cpu_active_mask_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cpu_active_mask_op); +} + +static const struct file_operations proc_cpu_active_mask_operations = { + .open = cpu_active_mask_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_cpu_active_mask_init(void) +{ + proc_create("cpu_active_mask", 0, NULL, &proc_cpu_active_mask_operations); + return 0; +} +fs_initcall(proc_cpu_active_mask_init); diff --git a/arch/sw_64/kernel/proto.h b/arch/sw_64/kernel/proto.h new file mode 100644 index 000000000000..d7222334d1b9 --- /dev/null +++ b/arch/sw_64/kernel/proto.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_PROTO_H +#define _SW64_KERNEL_PROTO_H + +#include +#include +#include +#include + +/* ptrace.c */ +extern int ptrace_set_bpt(struct task_struct *child); +extern int ptrace_cancel_bpt(struct task_struct *child); + +/* traps.c */ +extern void show_regs(struct pt_regs *regs); +extern void die(char *str, struct pt_regs *regs, long err); + +#endif /* _SW64_KERNEL_PROTO_H */ diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c new file mode 100644 index 000000000000..148d639a08db --- /dev/null +++ b/arch/sw_64/kernel/segvdbg.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Zhi Tongze + * Author: Zhi Tongze + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + +#include + +extern bool segv_debug_enabled; + +static int __init segv_debug_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + + debugfs_create_bool("segv_debug", 0644, + sw64_debugfs_dir, &segv_debug_enabled); + return 0; +} +late_initcall(segv_debug_init); diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c new file mode 100644 index 000000000000..f2de5ac3d9dc --- /dev/null +++ b/arch/sw_64/kernel/tc.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + */ + + +#include +#include + +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +unsigned long time_sync; + +DEFINE_PER_CPU(u64, tc_offset); + +void tc_sync_clear(void) +{ + time_sync = 0; +} + +void tc_sync_ready(void *ignored) +{ + /* make sure we can see time_sync been set to 0 */ + smp_mb(); + while (!time_sync) + cpu_relax(); + + __this_cpu_write(tc_offset, time_sync - rdtc()); +} + +void tc_sync_set(void) +{ + time_sync = rdtc() + __this_cpu_read(tc_offset); +} diff --git a/arch/sw_64/kernel/termios.c b/arch/sw_64/kernel/termios.c new file mode 100644 index 000000000000..5c76a513c896 --- /dev/null +++ b/arch/sw_64/kernel/termios.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +int user_termio_to_kernel_termios(struct ktermios *a_termios, struct termio __user *u_termio) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon, ret; + + ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); + if (!ret) { + /* Overwrite only the low bits. */ + *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; + *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; + *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; + *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; + canon = k_termio.c_lflag & ICANON; + + k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; + k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; + k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; + k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; + k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; + k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; + k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; + k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; + } + return ret; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + * + * Note the "fun" _VMIN overloading. + */ +int kernel_termios_to_user_termio(struct termio __user *u_termio, struct ktermios *a_termios) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon; + + k_termio.c_iflag = k_termios->c_iflag; + k_termio.c_oflag = k_termios->c_oflag; + k_termio.c_cflag = k_termios->c_cflag; + canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; + + k_termio.c_line = k_termios->c_line; + k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; + k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; + k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; + k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; + k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; + k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; + k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; + k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; + + return copy_to_user(u_termio, &k_termio, sizeof(k_termio)); +} -- Gitee From 83f0691ab1ed0538a1a72789aeba16687ddc20df Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:25 +0800 Subject: [PATCH 306/953] anolis: sw64: add some library functions ANBZ: #4688 Add some library functions for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/checksum.h | 126 ++++ arch/sw_64/include/asm/delay.h | 11 + arch/sw_64/include/asm/string.h | 54 ++ arch/sw_64/include/asm/xor.h | 857 +++++++++++++++++++++ arch/sw_64/include/uapi/asm/swab.h | 43 ++ arch/sw_64/lib/Kconfig | 47 ++ arch/sw_64/lib/Makefile | 53 ++ arch/sw_64/lib/checksum.c | 147 ++++ arch/sw_64/lib/clear_page.S | 46 ++ arch/sw_64/lib/clear_user.S | 102 +++ arch/sw_64/lib/copy_page.S | 71 ++ arch/sw_64/lib/copy_user.S | 106 +++ arch/sw_64/lib/csum_ipv6_magic.S | 113 +++ arch/sw_64/lib/csum_partial_copy.c | 154 ++++ arch/sw_64/lib/deep-clear_page.S | 53 ++ arch/sw_64/lib/deep-clear_user.S | 52 ++ arch/sw_64/lib/deep-copy_page.S | 60 ++ arch/sw_64/lib/deep-copy_template.S | 301 ++++++++ arch/sw_64/lib/deep-copy_template_c4.S | 108 +++ arch/sw_64/lib/deep-copy_user.S | 53 ++ arch/sw_64/lib/deep-memcpy.S | 24 + arch/sw_64/lib/deep-memset.S | 97 +++ arch/sw_64/lib/deep-set_template.S | 133 ++++ arch/sw_64/lib/deep-set_template_c4.S | 93 +++ arch/sw_64/lib/divide.S | 190 +++++ arch/sw_64/lib/fls.c | 33 + arch/sw_64/lib/fpreg.c | 992 +++++++++++++++++++++++++ arch/sw_64/lib/iomap.c | 477 ++++++++++++ arch/sw_64/lib/iomap_copy.c | 52 ++ arch/sw_64/lib/memcpy.S | 201 +++++ arch/sw_64/lib/memmove.S | 148 ++++ arch/sw_64/lib/memset.S | 153 ++++ arch/sw_64/lib/strcpy.S | 131 ++++ arch/sw_64/lib/strncpy.S | 156 ++++ arch/sw_64/lib/uaccess_flushcache.c | 42 ++ arch/sw_64/lib/udelay.c | 59 ++ 36 files changed, 5538 insertions(+) create mode 100644 arch/sw_64/include/asm/checksum.h create mode 100644 arch/sw_64/include/asm/delay.h create mode 100644 arch/sw_64/include/asm/string.h create mode 100644 arch/sw_64/include/asm/xor.h create mode 100644 arch/sw_64/include/uapi/asm/swab.h create mode 100644 arch/sw_64/lib/Kconfig create mode 100644 arch/sw_64/lib/Makefile create mode 100644 arch/sw_64/lib/checksum.c create mode 100644 arch/sw_64/lib/clear_page.S create mode 100644 arch/sw_64/lib/clear_user.S create mode 100644 arch/sw_64/lib/copy_page.S create mode 100644 arch/sw_64/lib/copy_user.S create mode 100644 arch/sw_64/lib/csum_ipv6_magic.S create mode 100644 arch/sw_64/lib/csum_partial_copy.c create mode 100644 arch/sw_64/lib/deep-clear_page.S create mode 100644 arch/sw_64/lib/deep-clear_user.S create mode 100644 arch/sw_64/lib/deep-copy_page.S create mode 100644 arch/sw_64/lib/deep-copy_template.S create mode 100644 arch/sw_64/lib/deep-copy_template_c4.S create mode 100644 arch/sw_64/lib/deep-copy_user.S create mode 100644 arch/sw_64/lib/deep-memcpy.S create mode 100644 arch/sw_64/lib/deep-memset.S create mode 100644 arch/sw_64/lib/deep-set_template.S create mode 100644 arch/sw_64/lib/deep-set_template_c4.S create mode 100644 arch/sw_64/lib/divide.S create mode 100644 arch/sw_64/lib/fls.c create mode 100644 arch/sw_64/lib/fpreg.c create mode 100644 arch/sw_64/lib/iomap.c create mode 100644 arch/sw_64/lib/iomap_copy.c create mode 100644 arch/sw_64/lib/memcpy.S create mode 100644 arch/sw_64/lib/memmove.S create mode 100644 arch/sw_64/lib/memset.S create mode 100644 arch/sw_64/lib/strcpy.S create mode 100644 arch/sw_64/lib/strncpy.S create mode 100644 arch/sw_64/lib/uaccess_flushcache.c create mode 100644 arch/sw_64/lib/udelay.c diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h new file mode 100644 index 000000000000..7f3768290402 --- /dev/null +++ b/arch/sw_64/include/asm/checksum.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CHECKSUM_H +#define _ASM_SW64_CHECKSUM_H + +#include + +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +#define _HAVE_ARCH_IPV6_CSUM +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, __u32 len, + __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + +#endif /* _ASM_SW64_CHECKSUM_H */ diff --git a/arch/sw_64/include/asm/delay.h b/arch/sw_64/include/asm/delay.h new file mode 100644 index 000000000000..f4080753e954 --- /dev/null +++ b/arch/sw_64/include/asm/delay.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DELAY_H +#define _ASM_SW64_DELAY_H + +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); + +extern void ndelay(unsigned long nsecs); +#define ndelay ndelay + +#endif /* _ASM_SW64_DELAY_H */ diff --git a/arch/sw_64/include/asm/string.h b/arch/sw_64/include/asm/string.h new file mode 100644 index 000000000000..87d93f4cd4d5 --- /dev/null +++ b/arch/sw_64/include/asm/string.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_STRING_H +#define _ASM_SW64_STRING_H + +#ifdef __KERNEL__ + +/* + * GCC of any recent vintage doesn't do stupid things with bcopy. + * EGCS 1.1 knows all about expanding memcpy inline, others don't. + * + * Similarly for a memset with data = 0. + */ + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *dest, const void *src, size_t n); +/* For backward compatibility with modules. Unused otherwise. */ +extern void *__memcpy(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMSET +extern void *__constant_c_memset(void *s, unsigned long c, size_t n); +extern void *___memset(void *s, int c, size_t n); +extern void *__memset(void *s, int c, size_t n); +extern void *memset(void *s, int c, size_t n); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *dest, const char *src); + +#define __HAVE_ARCH_STRNCPY +extern char *strncpy(char *dest, const char *src, size_t n); + +/* The following routine is like memset except that it writes 16-bit + * aligned values. The DEST and COUNT parameters must be even for + * correct operation. + */ + +#define __HAVE_ARCH_MEMSETW +extern void *__memsetw(void *dest, unsigned short c, size_t count); + +#define memsetw(s, c, n) \ +(__builtin_constant_p(c) \ + ? __constant_c_memset((s), 0x0001000100010001UL * (unsigned short)(c), (n)) \ + : __memsetw((s), (c), (n))) + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +#define __HAVE_ARCH_MEMCPY_FLUSHCACHE +void memcpy_flushcache(void *dst, const void *src, size_t cnt); +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_STRING_H */ diff --git a/arch/sw_64/include/asm/xor.h b/arch/sw_64/include/asm/xor.h new file mode 100644 index 000000000000..0aff8804f503 --- /dev/null +++ b/arch/sw_64/include/asm/xor.h @@ -0,0 +1,857 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized RAID-5 checksumming functions. + */ + +#ifndef _ASM_SW64_XOR_H +#define _ASM_SW64_XOR_H + +extern void xor_sw64_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +extern void xor_sw64_prefetch_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_prefetch_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_prefetch_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_prefetch_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +asm(" \n\ + .text \n\ + .align 3 \n\ + .ent xor_sw64_2 \n\ +xor_sw64_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + xor $0, $1, $0 # 7 cycles from $1 load \n\ + \n\ + ldl $27, 56($18) \n\ + xor $2, $3, $2 \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + \n\ + ret \n\ + .end xor_sw64_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_3 \n\ +xor_sw64_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 6 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + xor $0, $1, $1 # 4 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + stl $2, 40($17) \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + \n\ + stl $5, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $20, 56($17) \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_4 \n\ +xor_sw64_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + \n\ + ldl $25, 48($19) \n\ + xor $3, $5, $5 \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + ldl $3, 56($20) \n\ + \n\ + stl $5, 32($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + xor $7, $22, $22 \n\ + xor $23, $24, $24 # 5 cycles from $24 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $25, $27, $27 # 5 cycles from $27 load \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 5 cycles from $1 load \n\ + \n\ + stl $27, 48($17) \n\ + xor $2, $3, $3 # 4 cycles from $3 load \n\ + xor $1, $3, $3 \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_5 \n\ +xor_sw64_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + xor $28, $1, $1 \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + subl $16, 1, $16 \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + ret \n\ + .end xor_sw64_5 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_2 \n\ +xor_sw64_prefetch_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + ldl $27, 56($18) \n\ + \n\ + fillde 256($17) \n\ + xor $0, $1, $0 # 8 cycles from $1 load \n\ + fillde 256($18) \n\ + xor $2, $3, $2 \n\ + \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + ret \n\ + .end xor_sw64_prefetch_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_3 \n\ +xor_sw64_prefetch_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + fillde 256($17) \n\ + fillde 256($18) \n\ + fillde 256($19) \n\ + \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + stl $2, 40($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $5, 48($17) \n\ + addl $19, 64, $19 \n\ + stl $20, 56($17) \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_prefetch_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_4 \n\ +xor_sw64_prefetch_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + xor $3, $5, $5 \n\ + \n\ + ldl $24, 48($18) \n\ + ldl $25, 48($19) \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + ldl $3, 56($20) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + \n\ + fillde 256($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + fillde 256($18) \n\ + xor $7, $22, $22 \n\ + \n\ + fillde 256($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + fillde 256($20) \n\ + xor $25, $27, $27 # 6 cycles from $27 load \n\ + \n\ + stl $5, 32($17) \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 7 cycles from $1 load \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $1, $3, $3 \n\ + stl $27, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_prefetch_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_5 \n\ +xor_sw64_prefetch_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + fillde 0($21) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + fillde 64($21) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + fillde 128($21) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + fillde 192($21) \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + fillde 256($17) \n\ + xor $28, $1, $1 \n\ + fillde 256($18) \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + \n\ + fillde 256($19) \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + fillde 256($20) \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + fillde 256($21) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + subl $16, 1, $16 \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + \n\ + ret \n\ + .end xor_sw64_prefetch_5 \n\ +"); + +static struct xor_block_template xor_block_sw64 = { + .name = "sw64", + .do_2 = xor_sw64_2, + .do_3 = xor_sw64_3, + .do_4 = xor_sw64_4, + .do_5 = xor_sw64_5, +}; + +static struct xor_block_template xor_block_sw64_prefetch = { + .name = "sw64 prefetch", + .do_2 = xor_sw64_prefetch_2, + .do_3 = xor_sw64_prefetch_3, + .do_4 = xor_sw64_prefetch_4, + .do_5 = xor_sw64_prefetch_5, +}; + +/* For grins, also test the generic routines. */ +#include + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_sw64); \ + xor_speed(&xor_block_sw64_prefetch); \ + } while (0) + +/* Force the use of sw64_prefetch as it is significantly + * faster in the cold cache case. + */ +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sw64_prefetch) + +#endif /* _ASM_SW64_XOR_H */ diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h new file mode 100644 index 000000000000..275661b346ac --- /dev/null +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SWAB_H +#define _UAPI_ASM_SW64_SWAB_H + +#include +#include +#include + +#ifdef __GNUC__ + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + /* + * Unfortunately, we can't use the 6 instruction sequence + * on sw64 since the latency of the UNPKBW is 3, which is + * pretty hard to hide. Just in case a future implementation + * has a lower latency, here's the sequence (also by Mike Burrows) + * + * UNPKBW a0, v0 v0: 00AA00BB00CC00DD + * SLL v0, 24, a0 a0: BB00CC00DD000000 + * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD + * EXTWL a0, 6, v0 v0: 000000000000BBAA + * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 + * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA + */ + + __u64 t0, t1, t2, t3; + + t0 = __kernel_inshw(x, 7); /* t0 : 0000000000AABBCC */ + t1 = __kernel_inslh(x, 3); /* t1 : 000000CCDD000000 */ + t1 |= t0; /* t1 : 000000CCDDAABBCC */ + t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ + t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ + t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ + t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ + + return t1; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __GNUC__ */ + +#endif /* _UAPI_ASM_SW64_SWAB_H */ diff --git a/arch/sw_64/lib/Kconfig b/arch/sw_64/lib/Kconfig new file mode 100644 index 000000000000..e22751a457ce --- /dev/null +++ b/arch/sw_64/lib/Kconfig @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Library optimization options" + +config DEEP_CLEAR_PAGE + bool "Clear Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear page routine. + Say N if you want to use the generic version. + +config DEEP_CLEAR_USER + bool "Clear User with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear user routine. + Say N if you want to use the generic version. + +config DEEP_COPY_PAGE + bool "Copy Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy page routine. + Say N if you want to use the generic version. + +config DEEP_COPY_USER + bool "Copy User with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy user routine. + Say N if you want to use the generic version. + + +config DEEP_MEMCPY + bool "Memory Copy with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory copy routine. + Say N if you want to use the generic version. + +config DEEP_MEMSET + bool "Memory Set with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory set routine. + Say N if you want to use the generic version. + +endmenu diff --git a/arch/sw_64/lib/Makefile b/arch/sw_64/lib/Makefile new file mode 100644 index 000000000000..e6455bb51139 --- /dev/null +++ b/arch/sw_64/lib/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for sw-specific library files.. +# + +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Werror + +lib-y = __divlu.o __remlu.o __divwu.o __remwu.o \ + udelay.o \ + memmove.o \ + checksum.o \ + csum_partial_copy.o \ + fpreg.o \ + strcpy.o \ + strncpy.o \ + fls.o \ + csum_ipv6_magic.o + +lib-clear_page-y := clear_page.o +lib-clear_page-$(CONFIG_DEEP_CLEAR_PAGE) := deep-clear_page.o + +lib-clear_user-y := clear_user.o +lib-clear_user-$(CONFIG_DEEP_CLEAR_USER) := deep-clear_user.o + +lib-copy_page-y := copy_page.o +lib-copy_page-$(CONFIG_DEEP_COPY_PAGE) := deep-copy_page.o + +lib-copy_user-y := copy_user.o +lib-copy_user-$(CONFIG_DEEP_COPY_USER) := deep-copy_user.o + +lib-memcpy-y := memcpy.o +lib-memcpy-$(CONFIG_DEEP_MEMCPY) := deep-memcpy.o + +lib-memset-y := memset.o +lib-memset-$(CONFIG_DEEP_MEMSET) := deep-memset.o + +lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +lib-y += $(lib-clear_page-y) $(lib-clear_user-y) $(lib-copy_page-y) $(lib-copy_user-y) $(lib-memcpy-y) $(lib-memset-y) + +obj-y = iomap.o +obj-y += iomap_copy.o + +# The division routines are built from single source, with different defines. +AFLAGS___divlu.o = -DDIV +AFLAGS___remlu.o = -DREM +AFLAGS___divwu.o = -DDIV -DINTSIZE +AFLAGS___remwu.o = -DREM -DINTSIZE + +$(addprefix $(obj)/,__divlu.o __remlu.o __divwu.o __remwu.o): \ + $(src)/divide.S FORCE + $(call if_changed_rule,as_o_S) diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c new file mode 100644 index 000000000000..d1314caa15bf --- /dev/null +++ b/arch/sw_64/lib/checksum.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + * Comments in other versions indicate that the algorithms are from RFC1071 + */ +#include +#include +#include +#include + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented. + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + return (__force __sum16)~from64to16( + (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8)); +} +EXPORT_SYMBOL(csum_tcpudp_magic); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long result; + + result = (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8); + + /* + * Fold down to 32-bits so we don't lose in the typedef-less + * network stack. + * + * 64 to 33 + */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_tcpudp_nofold); + +/* + * Do a 64-bit checksum on an arbitrary memory area.. + */ +static inline unsigned long do_csum(const unsigned char *buff, int len) +{ + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + } + + return from64to16(checksum); +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} +EXPORT_SYMBOL(ip_fast_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned long result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += (__force u32)sum; + /* 32+c bits -> 32 bits */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~from64to16(do_csum(buff, len)); +} +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/sw_64/lib/clear_page.S b/arch/sw_64/lib/clear_page.S new file mode 100644 index 000000000000..e1cc7cddfd2f --- /dev/null +++ b/arch/sw_64/lib/clear_page.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + + stl_nc $31, 0x0($16) + stl_nc $31, 0x8($16) + stl_nc $31, 0x10($16) + stl_nc $31, 0x18($16) + + stl_nc $31, 0x20($16) + stl_nc $31, 0x28($16) + stl_nc $31, 0x30($16) + stl_nc $31, 0x38($16) + + stl_nc $31, 0x40($16) + stl_nc $31, 0x48($16) + stl_nc $31, 0x50($16) + stl_nc $31, 0x58($16) + + stl_nc $31, 0x60($16) + stl_nc $31, 0x68($16) + subl $0, 1, $0 + + stl_nc $31, 0x70($16) + stl_nc $31, 0x78($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/clear_user.S b/arch/sw_64/lib/clear_user.S new file mode 100644 index 000000000000..5ac77fc8ca0d --- /dev/null +++ b/arch/sw_64/lib/clear_user.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EX(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $exception-99b($31); \ + .previous + + .set noat + .set noreorder + .align 4 + + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 + .prologue 0 +__clear_user: + and $17, $17, $0 + and $16, 7, $4 + beq $0, $zerolength + addl $0, $4, $1 + and $1, 7, $2 + srl $1, 3, $1 + beq $4, $loop + + subl $4, 8, $4 + addl $0, $4, $0 + beq $1, $oneword + +$head: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $head + subl $1, 1, $1 + br $loop + unop + +$oneword: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $oneword + clr $0 + +$zerolength: +$exception: + ret $31, ($26), 1 + +$loop: + and $1, 3, $4 + beq $4, 1f + +0: EX(stl $31, 0($16)) + subl $0, 8, $0 + subl $4, 1, $4 + addl $16, 8, $16 + bne $4, 0b + unop + +1: bic $1, 3, $1 + beq $1, $tail + +2: EX(stl $31, 0($16)) + subl $0, 8, $0 + EX(stl $31, 8($16)) + subl $0, 8, $0 + EX(stl $31, 16($16)) + subl $0, 8, $0 + EX(stl $31, 24($16)) + subl $0, 8, $0 + subl $1, 4, $1 + addl $16, 32, $16 + bne $1, 2b + +$tail: + bne $2, 1f + ret $31, ($26), 1 + +1: + EX(stb $31, 0($16)) + addl $16, 1, $16 + subl $2, 1, $2 + bne $2, 1b + clr $0 + ret $31, ($26), 1 + + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/copy_page.S b/arch/sw_64/lib/copy_page.S new file mode 100644 index 000000000000..898472c36c80 --- /dev/null +++ b/arch/sw_64/lib/copy_page.S @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + ldl $0, 0($17) + ldl $1, 8($17) + ldl $2, 16($17) + ldl $3, 24($17) + + stl_nc $0, 0($16) + stl_nc $1, 8($16) + stl_nc $2, 16($16) + stl_nc $3, 24($16) + + ldl $4, 32($17) + ldl $5, 40($17) + ldl $6, 48($17) + ldl $7, 56($17) + + stl_nc $4, 32($16) + stl_nc $5, 40($16) + stl_nc $6, 48($16) + stl_nc $7, 56($16) + + ldl $0, 64($17) + ldl $1, 72($17) + ldl $2, 80($17) + ldl $3, 88($17) + + stl_nc $0, 64($16) + stl_nc $1, 72($16) + stl_nc $2, 80($16) + stl_nc $3, 88($16) + + ldl $4, 96($17) + ldl $5, 104($17) + ldl $6, 112($17) + ldl $7, 120($17) + + stl_nc $4, 96($16) + stl_nc $5, 104($16) + stl_nc $6, 112($16) + stl_nc $7, 120($16) + + ldwe $f31, 3 * 0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/copy_user.S b/arch/sw_64/lib/copy_user.S new file mode 100644 index 000000000000..2c3dd0b5656c --- /dev/null +++ b/arch/sw_64/lib/copy_user.S @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copy to/from user space, handling exceptions as we go.. This + * isn't exactly pretty. + * + * This is essentially the same as "memcpy()", but with a few twists. + * Notably, we have to make sure that $0 is always up-to-date and + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EXI(x,y...) \ + 99: x,##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x, ##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitout-99b($31); \ + .previous + + .set noat + .align 4 + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + and $18, $18, $0 + and $16, 7, $3 + beq $0, $35 + beq $3, $36 + subl $3, 8, $3 + .align 4 +$37: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $3, 1, $3 + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + beq $0, $41 + bne $3, $37 +$36: + and $17, 7, $1 + bic $0, 7, $4 + beq $1, $43 + beq $4, $48 + EXI(ldl_u $3, 0($17)) + .align 4 +$50: + EXI(ldl_u $2, 8($17)) + subl $4, 8, $4 + extll $3, $17, $3 + exthl $2, $17, $1 + bis $3, $1, $1 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bis $2, $2, $3 + bne $4, $50 +$48: + beq $0, $41 + .align 4 +$57: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + bne $0, $57 + br $31, $41 + .align 4 +$43: + beq $4, $65 + .align 4 +$66: + EXI(ldl $1, 0($17)) + subl $4, 8, $4 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bne $4, $66 +$65: + beq $0, $41 + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $17, 1, $17 + addl $16, 1, $16 + subl $0, 1, $0 + br $31, $65 +$41: +$35: +$exitin: +$exitout: + ret $31, ($26), 1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/csum_ipv6_magic.S b/arch/sw_64/lib/csum_ipv6_magic.S new file mode 100644 index 000000000000..755e1c13cb25 --- /dev/null +++ b/arch/sw_64/lib/csum_ipv6_magic.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * unsigned short csum_ipv6_magic(struct in6_addr *saddr, + * struct in6_addr *daddr, __u32 len, + * unsigned short proto, unsigned int csum); + * + * Misalignment handling (which costs 16 instructions / 8 cycles) + * added by Ivan Kokshaysky + */ +#include + .globl csum_ipv6_magic + .align 4 + .ent csum_ipv6_magic + .frame $30, 0, $26, 0 +csum_ipv6_magic: + .prologue 0 + + ldl_u $0, 0($16) + zapnot $20, 15, $20 + exthl $18, 1, $4 + ldl_u $21, 7($16) + + extlb $18, 1, $5 + ldl_u $1, 8($16) + extlb $18, 2, $6 + ldl_u $22, 15($16) + + extlb $18, 3, $18 + ldl_u $2, 0($17) + sra $4, 32, $4 + ldl_u $23, 7($17) + + extll $0, $16, $0 + ldl_u $3, 8($17) + exthl $21, $16, $21 + ldl_u $24, 15($17) + + sll $5, 16, $5 + or $0, $21, $0 + extll $1, $16, $1 + addl $20, $0, $20 + + exthl $22, $16, $22 + cmpult $20, $0, $0 + sll $6, 8, $6 + or $1, $22, $1 + + extll $2, $17, $2 + or $4, $18, $18 + exthl $23, $17, $23 + or $5, $6, $5 + + extll $3, $17, $3 + or $2, $23, $2 + exthl $24, $17, $24 + or $18, $5, $18 + + exthh $19, 7, $7 + or $3, $24, $3 + extlb $19, 1, $19 + addl $20, $1, $20 + + or $19, $7, $19 + cmpult $20, $1, $1 + sll $19, 48, $19 + + sra $19, 32, $19 + addl $20, $2, $20 + cmpult $20, $2, $2 + addl $20, $3, $20 + + cmpult $20, $3, $3 + addl $20, $18, $20 + cmpult $20, $18, $18 + addl $20, $19, $20 + + cmpult $20, $19, $19 + addl $0, $1, $0 + addl $2, $3, $2 + addl $18, $19, $18 + + addl $0, $2, $0 + addl $20, $18, $20 + addl $0, $20, $0 + unop + + extlh $0, 2, $2 + zapnot $0, 3, $3 + extlh $0, 4, $1 + addl $2, $3, $3 + + extlh $0, 6, $0 + addl $3, $1, $3 + addl $0, $3, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + not $0, $0 + + zapnot $0, 3, $0 + ret + + .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c new file mode 100644 index 000000000000..1a8c18757e09 --- /dev/null +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * csum_partial_copy - do IP checksumming and copy + * + * (C) Copyright 1996 Linus Torvalds + * + * Don't look at this too closely - you'll go mad. The things + * we do for performance.. + */ + +#include +#include +#include +#include + + +#define ldl_u(x, y) \ + __asm__ __volatile__("ldl_u %0, %1":"=r" (x):"m" (*(const unsigned long *)(y))) + +#define stl_u(x, y) \ + __asm__ __volatile__("stl_u %1, %0":"=m" (*(unsigned long *)(y)):"r" (x)) + +static inline void stll_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; doff < 8; i++, doff++) + *((char *)dst + i) = *((char *)&data + i); +} + +static inline void sthl_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; i < doff; i++) + *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); +} + +#define __get_word(insn, x, ptr) \ +({ \ + long __guu_err; \ + __asm__ __volatile__( \ + "1: "#insn" %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0,2b-1b(%1)\n" \ + ".previous" \ + : "=r"(x), "=r"(__guu_err) \ + : "m"(__m(ptr)), "1"(0)); \ + __guu_err; \ +}) + +static inline unsigned long +csum_partial_cfu_dest_aligned(const unsigned long __user *src, + unsigned long *dst, long len) +{ + unsigned long word; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + checksum += word; + checksum += (checksum < word); + + return checksum; +} + +static inline unsigned long +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) +{ + unsigned long word, patch; + unsigned long partial_dest, second_dest; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + + return checksum; +} + +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) +{ + unsigned long checksum; + unsigned long doff = 7 & (unsigned long) dst; + + if (!doff) { + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + } else { + checksum = csum_partial_cfu_dest_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, doff, len-8); + } + return (__force __wsum)from64to16(checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); +} +EXPORT_SYMBOL(csum_and_copy_from_user); + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_and_copy((__force const void __user *)src, + dst, len); +} +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/sw_64/lib/deep-clear_page.S b/arch/sw_64/lib/deep-clear_page.S new file mode 100644 index 000000000000..52a3db33fc17 --- /dev/null +++ b/arch/sw_64/lib/deep-clear_page.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0,64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + +/* + stl_nc $31,0x0($16) + stl_nc $31,0x8($16) + stl_nc $31,0x10($16) + stl_nc $31,0x18($16) + + stl_nc $31,0x20($16) + stl_nc $31,0x28($16) + stl_nc $31,0x30($16) + stl_nc $31,0x38($16) + + stl_nc $31,0x40($16) + stl_nc $31,0x48($16) + stl_nc $31,0x50($16) + stl_nc $31,0x58($16) + + stl_nc $31,0x60($16) + stl_nc $31,0x68($16) + stl_nc $31,0x70($16) + stl_nc $31,0x78($16) +*/ + + vstd_nc $f31, 0x0($16) + vstd_nc $f31, 0x20($16) + subl $0, 1, $0 + vstd_nc $f31, 0x40($16) + + vstd_nc $f31, 0x60($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/deep-clear_user.S b/arch/sw_64/lib/deep-clear_user.S new file mode 100644 index 000000000000..c81418ed99a2 --- /dev/null +++ b/arch/sw_64/lib/deep-clear_user.S @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Mao Minkai + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $18: bytes left to copy + * + */ + .globl __clear_user + .ent __clear_user +__clear_user: + .prologue 0 + bis $31, $31, $7 + mov $17, $18 + bis $31, $31, $17 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/deep-copy_page.S b/arch/sw_64/lib/deep-copy_page.S new file mode 100644 index 000000000000..a9b9d97f318a --- /dev/null +++ b/arch/sw_64/lib/deep-copy_page.S @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + subl $sp, 0x60, $sp + ldi $4, 0x40($sp) + stl $4, 0($sp) + bic $4, 0x1f, $4 + vstd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrr $5, CSR_WR_FREGS +#endif + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + vldd $f16, 0($17) + vstd_nc $f16, 0($16) + + vldd $f16, 32($17) + vstd_nc $f16, 32($16) + + vldd $f16, 64($17) + vstd_nc $f16, 64($16) + + vldd $f16, 96($17) + vstd_nc $f16, 96($16) + + ldwe $f31, 5*0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ldl $4, 0($sp) + ldi $4, 0x40($sp) + bic $4, 0x1f, $4 + vldd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrw $5, CSR_WR_FREGS +#endif + addl $sp, 0x60, $sp + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/deep-copy_template.S b/arch/sw_64/lib/deep-copy_template.S new file mode 100644 index 000000000000..7705eb3f36d4 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template.S @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $4: 8-byte misalignment of src when dest is 8-byte aligned + * $5: 32-byte misalignment of src when dest is 32-byte aligned + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0xc0($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f3, 0x80($23); \ + ldi $7, 2 + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f3, 0x80($23); \ + ldi $sp, 0xc0($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + +$byte_loop_head: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + and $17, 7, $4 + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + bne $4, $quad_u_loop_head + +$quad_loop_head: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 4 +$simd_loop_nc: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + bne $4, $prep_quad_u_loop_tail + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out + +/* misaligned src and dst */ +$quad_u_loop_head: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 7($17) ) + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + FIXUP_LDST( vldd $f4, 0($3) ) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 4 +$simd_u_loop_nc: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f3, $f4, $f31, $f3 + FIXUP_LDST( vstd_nc $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f5 + FIXUP_LDST( vstd_nc $f5, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 4 +$simd_u_loop: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop + +$simd_u_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd_u + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 + +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + FIXUP_LDST( ldl_u $2, 0($17) ) + .align 4 +$quad_u_loop_tail: + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + FIXUP_LDST( ldl_u $2, 16($17) ) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + FIXUP_LDST( stl $24, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_u_loop_tail + br $31, $quad_loop_end + +$move_one_quad_u: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail diff --git a/arch/sw_64/lib/deep-copy_template_c4.S b/arch/sw_64/lib/deep-copy_template_c4.S new file mode 100644 index 000000000000..e0740874dfa3 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template_c4.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd and simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-copy_user.S b/arch/sw_64/lib/deep-copy_user.S new file mode 100644 index 000000000000..b79f8f3f0f4a --- /dev/null +++ b/arch/sw_64/lib/deep-copy_user.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x, y) \ + 99: x, y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status for C3B + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $7: SIMD status for C4 + * 0: not in simd loop + * 1: in simd and simd_u loop + * $18: bytes left to copy + * + */ + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + .set noreorder + bis $31, $31, $7 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + subl $7, 1, $7 + beq $7, $restore_simd + +#if defined(CONFIG_SUBARCH_C3B) +$restore_simd_u: + RESTORE_SIMD_U_REGS + br $31, $return +#endif + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S new file mode 100644 index 000000000000..78a6bd85cf01 --- /dev/null +++ b/arch/sw_64/lib/deep-memcpy.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + mov $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + ret + .end memcpy + EXPORT_SYMBOL(memcpy) +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S new file mode 100644 index 000000000000..c6b5355beec6 --- /dev/null +++ b/arch/sw_64/lib/deep-memset.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized memset() for SW64 with SIMD instructions + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Fill SIZE bytes pointed to by SRC with CHAR. + * + * Input: + * $16: SRC, clobbered + * $17: CHAR, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: SRC + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr), tmp data + * $2: tmp data + * $3: tmp data + * $4: tmp data + * $5: compare result + * $f10: 32 bytes data (manually saved) + * + */ + +#include +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .set noat + .set noreorder + .text + .align 4 + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + .ent ___memset +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + +#ifdef CONFIG_SUBARCH_C4 + csrr $6, CSR_WR_FREGS +#endif +/* expand 1 byte data to 8 bytes */ + and $17, 0xff, $17 + sll $17, 8, $4 + bis $17, $4, $17 + sll $17, 16, $4 + bis $17, $4, $17 + sll $17, 32, $4 + bis $17, $4, $17 + +__constant_c_memset: + bis $31, $31, $7 + bis $31, $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: +#ifdef CONFIG_SUBARCH_C4 + csrw $6, CSR_WR_FREGS +#endif + ret + + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + bis $1, $2, $1 + inslh $17, 6, $4 + bis $1, $3, $1 + bis $1, $4, $17 + br $31, __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/deep-set_template.S b/arch/sw_64/lib/deep-set_template.S new file mode 100644 index 000000000000..f9073d638468 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + + .align 3 +$byte_loop_head: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + + .align 3 +$quad_loop_head: + FIXUP_LDST( stl $17, 0($16) ) + addl $16, 8, $16 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 3 +$simd_loop_nc: + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-set_template_c4.S b/arch/sw_64/lib/deep-set_template_c4.S new file mode 100644 index 000000000000..2b1bcab8fec9 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template_c4.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memset and clear_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/divide.S b/arch/sw_64/lib/divide.S new file mode 100644 index 000000000000..ceef343a6084 --- /dev/null +++ b/arch/sw_64/lib/divide.S @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) 1995 Linus Torvalds + * + * The sw64 chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divlu: 64-bit unsigned long divide + * __remlu: 64-bit unsigned long remainder + * __divls/__remqs: signed 64-bit + * __divwu/__remlu: unsigned 32-bit + * __divws/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ +#include + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x, ##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24, $25, x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x, ##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24, $24, x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(wu) +#define sfunction func(w) +#define LONGIFY(x) zapnot x, 15, x +#define SLONGIFY(x) addw x, 0, x +#else +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + +7: stl $1, 0($30) + bis $25, $25, divisor + stl $2, 8($30) + bis $24, $24, modulus + stl $0, 16($30) + bis $31, $31, quotient + LONGIFY(divisor) + stl tmp1, 24($30) + LONGIFY(modulus) + bis $31, 1, mask + DIV_ONLY(stl tmp2, 32($30)) + beq divisor, 9f # div by zero + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor, modulus, compare + s8addl divisor, $31, divisor + s8addl mask, $31, mask + bne compare, 1b +#else +1: cmpult divisor, modulus, compare + blt divisor, 2f + addl divisor, divisor, divisor + addl mask, mask, mask + bne compare, 1b +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addl quotient, mask, tmp2) + srl mask, 1, mask + cmpule divisor, modulus, compare + subl modulus, divisor, tmp1 + DIV_ONLY(selne compare, tmp2, quotient, quotient) + srl divisor, 1, divisor + selne compare, tmp1, modulus, modulus + bne mask, 2b + +9: ldl $1, 0($30) + ldl $2, 8($30) + ldl $0, 16($30) + ldl tmp1, 24($30) + DIV_ONLY(ldl tmp2, 32($30)) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end ufunction + EXPORT_SYMBOL(ufunction) +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + bis $24, $25, $28 + SLONGIFY($28) + bge $28, 7b + stl $24, 0($30) + subl $31, $24, $28 + stl $25, 8($30) + sellt $24, $28, $24, $24 # abs($24) + stl $23, 16($30) + subl $31, $25, $28 + stl tmp1, 24($30) + sellt $25, $28, $25, $25 # abs($25) + bsr $23, ufunction + ldl $24, 0($30) + ldl $25, 8($30) + GETSIGN($28) + subl $31, $27, tmp1 + SLONGIFY($28) + ldl $23, 16($30) + sellt $28, tmp1, $27, $27 + ldl tmp1, 24($30) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end sfunction + EXPORT_SYMBOL(sfunction) diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c new file mode 100644 index 000000000000..aa4231f7e472 --- /dev/null +++ b/arch/sw_64/lib/fls.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* This is fls(x)-1, except zero is held to zero. This allows most + * efficient input into extbl, plus it allows easy handling of fls(0)=0. + */ + +const unsigned char __flsm1_tab[256] = { + 0, + 0, + 1, 1, + 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +}; +EXPORT_SYMBOL(__flsm1_tab); diff --git a/arch/sw_64/lib/fpreg.c b/arch/sw_64/lib/fpreg.c new file mode 100644 index 000000000000..178870310908 --- /dev/null +++ b/arch/sw_64/lib/fpreg.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 1998 Linus Torvalds + */ + +#include +#include + +#define STT(reg, val) \ + asm volatile("fimovd $f"#reg", %0" : "=r"(val)) +#define STS(reg, val) \ + asm volatile("fimovs $f"#reg", %0" : "=r"(val)) +#define LDT(reg, val) \ + asm volatile("ifmovd %0, $f"#reg : : "r"(val)) +#define LDS(reg, val) \ + asm volatile("ifmovs %0, $f"#reg : : "r"(val)) +#define VLDD(reg, val) \ + asm volatile("vldd $f"#reg", %0" : : "m"(val) : "memory") +#define VSTD(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") +#define VLDS(reg, val) \ + asm volatile("vlds $f"#reg", %0" : : "m"(val) : "memory") +#define LDWE(reg, val) \ + asm volatile("ldwe $f"#reg", %0" : : "m"(val) : "memory") +#define VSTS(reg, val) \ + asm volatile("vsts $f"#reg", %0" : "=m"(val) : : "memory") +#define STDH(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") + +void +sw64_write_simd_fp_reg_s(unsigned long reg, unsigned long f0, unsigned long f1) +{ + + unsigned long tmpa[4] __aligned(16); + + tmpa[0] = f0; + tmpa[1] = f1; + + switch (reg) { + case 0: + VLDS(0, *tmpa); + break; + case 1: + VLDS(1, *tmpa); + break; + case 2: + VLDS(2, *tmpa); + break; + case 3: + VLDS(3, *tmpa); + break; + case 4: + VLDS(4, *tmpa); + break; + case 5: + VLDS(5, *tmpa); + break; + case 6: + VLDS(6, *tmpa); + break; + case 7: + VLDS(7, *tmpa); + break; + case 8: + VLDS(8, *tmpa); + break; + case 9: + VLDS(9, *tmpa); + break; + case 10: + VLDS(10, *tmpa); + break; + case 11: + VLDS(11, *tmpa); + break; + case 12: + VLDS(12, *tmpa); + break; + case 13: + VLDS(13, *tmpa); + break; + case 14: + VLDS(14, *tmpa); + break; + case 15: + VLDS(15, *tmpa); + break; + case 16: + VLDS(16, *tmpa); + break; + case 17: + VLDS(17, *tmpa); + break; + case 18: + VLDS(18, *tmpa); + break; + case 19: + VLDS(19, *tmpa); + break; + case 20: + VLDS(20, *tmpa); + break; + case 21: + VLDS(21, *tmpa); + break; + case 22: + VLDS(22, *tmpa); + break; + case 23: + VLDS(23, *tmpa); + break; + case 24: + VLDS(24, *tmpa); + break; + case 25: + VLDS(25, *tmpa); + break; + case 26: + VLDS(26, *tmpa); + break; + case 27: + VLDS(27, *tmpa); + break; + case 28: + VLDS(28, *tmpa); + break; + case 29: + VLDS(29, *tmpa); + break; + case 30: + VLDS(30, *tmpa); + break; + case 31: + break; + } + +} + + +void sw64_write_simd_fp_reg_d(unsigned long reg, unsigned long f0, + unsigned long f1, unsigned long f2, unsigned long f3) +{ + unsigned long tmpa[4] __aligned(32); + + tmpa[0] = f0; + tmpa[1] = f1; + tmpa[2] = f2; + tmpa[3] = f3; + + switch (reg) { + case 0: + VLDD(0, *tmpa); + break; + case 1: + VLDD(1, *tmpa); + break; + case 2: + VLDD(2, *tmpa); + break; + case 3: + VLDD(3, *tmpa); + break; + case 4: + VLDD(4, *tmpa); + break; + case 5: + VLDD(5, *tmpa); + break; + case 6: + VLDD(6, *tmpa); + break; + case 7: + VLDD(7, *tmpa); + break; + case 8: + VLDD(8, *tmpa); + break; + case 9: + VLDD(9, *tmpa); + break; + case 10: + VLDD(10, *tmpa); + break; + case 11: + VLDD(11, *tmpa); + break; + case 12: + VLDD(12, *tmpa); + break; + case 13: + VLDD(13, *tmpa); + break; + case 14: + VLDD(14, *tmpa); + break; + case 15: + VLDD(15, *tmpa); + break; + case 16: + VLDD(16, *tmpa); + break; + case 17: + VLDD(17, *tmpa); + break; + case 18: + VLDD(18, *tmpa); + break; + case 19: + VLDD(19, *tmpa); + break; + case 20: + VLDD(20, *tmpa); + break; + case 21: + VLDD(21, *tmpa); + break; + case 22: + VLDD(22, *tmpa); + break; + case 23: + VLDD(23, *tmpa); + break; + case 24: + VLDD(24, *tmpa); + break; + case 25: + VLDD(25, *tmpa); + break; + case 26: + VLDD(26, *tmpa); + break; + case 27: + VLDD(27, *tmpa); + break; + case 28: + VLDD(28, *tmpa); + break; + case 29: + VLDD(29, *tmpa); + break; + case 30: + VLDD(30, *tmpa); + break; + case 31: + break; + } + + +} + + +void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a) +{ + switch (reg) { + case 0: + LDWE(0, a); + break; + case 1: + LDWE(1, a); + break; + case 2: + LDWE(2, a); + break; + case 3: + LDWE(3, a); + break; + case 4: + LDWE(4, a); + break; + case 5: + LDWE(5, a); + break; + case 6: + LDWE(6, a); + break; + case 7: + LDWE(7, a); + break; + case 8: + LDWE(8, a); + break; + case 9: + LDWE(9, a); + break; + case 10: + LDWE(10, a); + break; + case 11: + LDWE(11, a); + break; + case 12: + LDWE(12, a); + break; + case 13: + LDWE(13, a); + break; + case 14: + LDWE(14, a); + break; + case 15: + LDWE(15, a); + break; + case 16: + LDWE(16, a); + break; + case 17: + LDWE(17, a); + break; + case 18: + LDWE(18, a); + break; + case 19: + LDWE(19, a); + break; + case 20: + LDWE(20, a); + break; + case 21: + LDWE(21, a); + break; + case 22: + LDWE(22, a); + break; + case 23: + LDWE(23, a); + break; + case 24: + LDWE(24, a); + break; + case 25: + LDWE(25, a); + break; + case 26: + LDWE(26, a); + break; + case 27: + LDWE(27, a); + break; + case 28: + LDWE(28, a); + break; + case 29: + LDWE(29, a); + break; + case 30: + LDWE(30, a); + break; + case 31: + break; + } +} + + +void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[2] __aligned(16); + + switch (reg) { + case 0: + VSTS(0, *tmpa); + break; + case 1: + VSTS(1, *tmpa); + break; + case 2: + VSTS(2, *tmpa); + break; + case 3: + VSTS(3, *tmpa); + break; + case 4: + VSTS(4, *tmpa); + break; + case 5: + VSTS(5, *tmpa); + break; + case 6: + VSTS(6, *tmpa); + break; + case 7: + VSTS(7, *tmpa); + break; + case 8: + VSTS(8, *tmpa); + break; + case 9: + VSTS(9, *tmpa); + break; + case 10: + VSTS(10, *tmpa); + break; + case 11: + VSTS(11, *tmpa); + break; + case 12: + VSTS(12, *tmpa); + break; + case 13: + VSTS(13, *tmpa); + break; + case 14: + VSTS(14, *tmpa); + break; + case 15: + VSTS(15, *tmpa); + break; + case 16: + VSTS(16, *tmpa); + break; + case 17: + VSTS(17, *tmpa); + break; + case 18: + VSTS(18, *tmpa); + break; + case 19: + VSTS(19, *tmpa); + break; + case 20: + VSTS(20, *tmpa); + break; + case 21: + VSTS(21, *tmpa); + break; + case 22: + VSTS(22, *tmpa); + break; + case 23: + VSTS(23, *tmpa); + break; + case 24: + VSTS(24, *tmpa); + break; + case 25: + VSTS(25, *tmpa); + break; + case 26: + VSTS(26, *tmpa); + break; + case 27: + VSTS(27, *tmpa); + break; + case 28: + VSTS(28, *tmpa); + break; + case 29: + VSTS(29, *tmpa); + break; + case 30: + VSTS(30, *tmpa); + break; + case 31: + VSTS(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; +} + +void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[4] __aligned(32); + + switch (reg) { + case 0: + VSTD(0, *tmpa); + break; + case 1: + VSTD(1, *tmpa); + break; + case 2: + VSTD(2, *tmpa); + break; + case 3: + VSTD(3, *tmpa); + break; + case 4: + VSTD(4, *tmpa); + break; + case 5: + VSTD(5, *tmpa); + break; + case 6: + VSTD(6, *tmpa); + break; + case 7: + VSTD(7, *tmpa); + break; + case 8: + VSTD(8, *tmpa); + break; + case 9: + VSTD(9, *tmpa); + break; + case 10: + VSTD(10, *tmpa); + break; + case 11: + VSTD(11, *tmpa); + break; + case 12: + VSTD(12, *tmpa); + break; + case 13: + VSTD(13, *tmpa); + break; + case 14: + VSTD(14, *tmpa); + break; + case 15: + VSTD(15, *tmpa); + break; + case 16: + VSTD(16, *tmpa); + break; + case 17: + VSTD(17, *tmpa); + break; + case 18: + VSTD(18, *tmpa); + break; + case 19: + VSTD(19, *tmpa); + break; + case 20: + VSTD(20, *tmpa); + break; + case 21: + VSTD(21, *tmpa); + break; + case 22: + VSTD(22, *tmpa); + break; + case 23: + VSTD(23, *tmpa); + break; + case 24: + VSTD(24, *tmpa); + break; + case 25: + VSTD(25, *tmpa); + break; + case 26: + VSTD(26, *tmpa); + break; + case 27: + VSTD(27, *tmpa); + break; + case 28: + VSTD(28, *tmpa); + break; + case 29: + VSTD(29, *tmpa); + break; + case 30: + VSTD(30, *tmpa); + break; + case 31: + VSTD(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; + *(fp_value+2) = tmpa[2]; + *(fp_value+3) = tmpa[3]; +} + +unsigned long sw64_read_fp_reg(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STT(0, val); + break; + case 1: + STT(1, val); + break; + case 2: + STT(2, val); + break; + case 3: + STT(3, val); + break; + case 4: + STT(4, val); + break; + case 5: + STT(5, val); + break; + case 6: + STT(6, val); + break; + case 7: + STT(7, val); + break; + case 8: + STT(8, val); + break; + case 9: + STT(9, val); + break; + case 10: + STT(10, val); + break; + case 11: + STT(11, val); + break; + case 12: + STT(12, val); + break; + case 13: + STT(13, val); + break; + case 14: + STT(14, val); + break; + case 15: + STT(15, val); + break; + case 16: + STT(16, val); + break; + case 17: + STT(17, val); + break; + case 18: + STT(18, val); + break; + case 19: + STT(19, val); + break; + case 20: + STT(20, val); + break; + case 21: + STT(21, val); + break; + case 22: + STT(22, val); + break; + case 23: + STT(23, val); + break; + case 24: + STT(24, val); + break; + case 25: + STT(25, val); + break; + case 26: + STT(26, val); + break; + case 27: + STT(27, val); + break; + case 28: + STT(28, val); + break; + case 29: + STT(29, val); + break; + case 30: + STT(30, val); + break; + case 31: + STT(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg); + +void sw64_write_fp_reg(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDT(0, val); + break; + case 1: + LDT(1, val); + break; + case 2: + LDT(2, val); + break; + case 3: + LDT(3, val); + break; + case 4: + LDT(4, val); + break; + case 5: + LDT(5, val); + break; + case 6: + LDT(6, val); + break; + case 7: + LDT(7, val); + break; + case 8: + LDT(8, val); + break; + case 9: + LDT(9, val); + break; + case 10: + LDT(10, val); + break; + case 11: + LDT(11, val); + break; + case 12: + LDT(12, val); + break; + case 13: + LDT(13, val); + break; + case 14: + LDT(14, val); + break; + case 15: + LDT(15, val); + break; + case 16: + LDT(16, val); + break; + case 17: + LDT(17, val); + break; + case 18: + LDT(18, val); + break; + case 19: + LDT(19, val); + break; + case 20: + LDT(20, val); + break; + case 21: + LDT(21, val); + break; + case 22: + LDT(22, val); + break; + case 23: + LDT(23, val); + break; + case 24: + LDT(24, val); + break; + case 25: + LDT(25, val); + break; + case 26: + LDT(26, val); + break; + case 27: + LDT(27, val); + break; + case 28: + LDT(28, val); + break; + case 29: + LDT(29, val); + break; + case 30: + LDT(30, val); + break; + case 31: + LDT(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg); + +unsigned long sw64_read_fp_reg_s(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STS(0, val); + break; + case 1: + STS(1, val); + break; + case 2: + STS(2, val); + break; + case 3: + STS(3, val); + break; + case 4: + STS(4, val); + break; + case 5: + STS(5, val); + break; + case 6: + STS(6, val); + break; + case 7: + STS(7, val); + break; + case 8: + STS(8, val); + break; + case 9: + STS(9, val); + break; + case 10: + STS(10, val); + break; + case 11: + STS(11, val); + break; + case 12: + STS(12, val); + break; + case 13: + STS(13, val); + break; + case 14: + STS(14, val); + break; + case 15: + STS(15, val); + break; + case 16: + STS(16, val); + break; + case 17: + STS(17, val); + break; + case 18: + STS(18, val); + break; + case 19: + STS(19, val); + break; + case 20: + STS(20, val); + break; + case 21: + STS(21, val); + break; + case 22: + STS(22, val); + break; + case 23: + STS(23, val); + break; + case 24: + STS(24, val); + break; + case 25: + STS(25, val); + break; + case 26: + STS(26, val); + break; + case 27: + STS(27, val); + break; + case 28: + STS(28, val); + break; + case 29: + STS(29, val); + break; + case 30: + STS(30, val); + break; + case 31: + STS(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg_s); + +void sw64_write_fp_reg_s(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDS(0, val); + break; + case 1: + LDS(1, val); + break; + case 2: + LDS(2, val); + break; + case 3: + LDS(3, val); + break; + case 4: + LDS(4, val); + break; + case 5: + LDS(5, val); + break; + case 6: + LDS(6, val); + break; + case 7: + LDS(7, val); + break; + case 8: + LDS(8, val); + break; + case 9: + LDS(9, val); + break; + case 10: + LDS(10, val); + break; + case 11: + LDS(11, val); + break; + case 12: + LDS(12, val); + break; + case 13: + LDS(13, val); + break; + case 14: + LDS(14, val); + break; + case 15: + LDS(15, val); + break; + case 16: + LDS(16, val); + break; + case 17: + LDS(17, val); + break; + case 18: + LDS(18, val); + break; + case 19: + LDS(19, val); + break; + case 20: + LDS(20, val); + break; + case 21: + LDS(21, val); + break; + case 22: + LDS(22, val); + break; + case 23: + LDS(23, val); + break; + case 24: + LDS(24, val); + break; + case 25: + LDS(25, val); + break; + case 26: + LDS(26, val); + break; + case 27: + LDS(27, val); + break; + case 28: + LDS(28, val); + break; + case 29: + LDS(29, val); + break; + case 30: + LDS(30, val); + break; + case 31: + LDS(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg_s); diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c new file mode 100644 index 000000000000..d9c66a89131e --- /dev/null +++ b/arch/sw_64/lib/iomap.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sw_64 IO and memory functions. + */ + +#include + +#include +#include + +/* + * Here comes the sw64 implementation of the IOMAP interfaces. + */ +unsigned int ioread8(const void __iomem *addr) +{ + return readb(addr); +} +EXPORT_SYMBOL(ioread8); + +unsigned int ioread16(const void __iomem *addr) +{ + return readw(addr); +} +EXPORT_SYMBOL(ioread16); + +unsigned int ioread32(const void __iomem *addr) +{ + return readl(addr); +} +EXPORT_SYMBOL(ioread32); + +void iowrite8(u8 b, void __iomem *addr) +{ + writeb(b, addr); +} +EXPORT_SYMBOL(iowrite8); + +void iowrite16(u16 b, void __iomem *addr) +{ + writew(b, addr); +} +EXPORT_SYMBOL(iowrite16); + +void iowrite32(u32 b, void __iomem *addr) +{ + writel(b, addr); +} +EXPORT_SYMBOL(iowrite32); + +u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} +EXPORT_SYMBOL(inb); + +u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} +EXPORT_SYMBOL(inw); + +u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} +EXPORT_SYMBOL(inl); + +void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} +EXPORT_SYMBOL(outb); + +void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} +EXPORT_SYMBOL(outw); + +void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} +EXPORT_SYMBOL(outl); + + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. + */ +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) +{ + while ((unsigned long)dst & 0x3) { + if (!count) + return; + count--; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + + count -= 4; + w = ioread8(port); + w |= ioread8(port) << 8; + w |= ioread8(port) << 16; + w |= ioread8(port) << 24; + *(unsigned int *)dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } +} +EXPORT_SYMBOL(ioread8_rep); + +void insb(unsigned long port, void *dst, unsigned long count) +{ + ioread8_rep(ioport_map(port, 1), dst, count); +} +EXPORT_SYMBOL(insb); + +/* + * Read COUNT 16-bit words from port PORT into memory starting at + * SRC. SRC must be at least short aligned. This is used by the + * IDE driver to read disk sectors. Performance is important, but + * the interfaces seems to be slow: just using the inlined version + * of the inw() breaks things. + */ +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)dst & 0x1); + count--; + *(unsigned short *)dst = ioread16(port); + dst += 2; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = ioread16(port); + w |= ioread16(port) << 16; + *(unsigned int *)dst = w; + dst += 4; + } + + if (count) + *(unsigned short *)dst = ioread16(port); +} +EXPORT_SYMBOL(ioread16_rep); + +void insw(unsigned long port, void *dst, unsigned long count) +{ + ioread16_rep(ioport_map(port, 2), dst, count); +} +EXPORT_SYMBOL(insw); + + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the inl() breaks things. + */ +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + while (count--) { + struct S { int x __packed; }; + ((struct S *)dst)->x = ioread32(port); + dst += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + *(unsigned int *)dst = ioread32(port); + dst += 4; + } + } +} +EXPORT_SYMBOL(ioread32_rep); + +void insl(unsigned long port, void *dst, unsigned long count) +{ + ioread32_rep(ioport_map(port, 4), dst, count); +} +EXPORT_SYMBOL(insl); + + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + */ +void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) +{ + const unsigned char *src = xsrc; + + while (count--) + iowrite8(*src++, port); +} +EXPORT_SYMBOL(iowrite8_rep); + +void outsb(unsigned long port, const void *src, unsigned long count) +{ + iowrite8_rep(ioport_map(port, 1), src, count); +} +EXPORT_SYMBOL(outsb); + + +/* + * Like insw but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Performance is important, but the + * interfaces seems to be slow: just using the inlined version of the + * outw() breaks things. + */ +void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)src & 0x1); + iowrite16(*(unsigned short *)src, port); + src += 2; + --count; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = *(unsigned int *)src; + src += 4; + iowrite16(w >> 0, port); + iowrite16(w >> 16, port); + } + + if (count) + iowrite16(*(unsigned short *)src, port); +} +EXPORT_SYMBOL(iowrite16_rep); + +void outsw(unsigned long port, const void *src, unsigned long count) +{ + iowrite16_rep(ioport_map(port, 2), src, count); +} +EXPORT_SYMBOL(outsw); + + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + */ +void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + while (count--) { + struct S { int x __packed; }; + iowrite32(((struct S *)src)->x, port); + src += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + iowrite32(*(unsigned int *)src, port); + src += 4; + } + } +} +EXPORT_SYMBOL(iowrite32_rep); + +void outsl(unsigned long port, const void *src, unsigned long count) +{ + iowrite32_rep(ioport_map(port, 4), src, count); +} +EXPORT_SYMBOL(outsl); + + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + * FIXME -- align FROM. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((u64)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((u64)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* Handle any initial odd word */ + if (count >= 4 && ((u64)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* + * Handle all full-sized quadwords: we're aligned + * (or have a small count) + */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) + __raw_writeb(c, to); + mb(); +} +EXPORT_SYMBOL(_memset_c_io); + +void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + unsigned long io_offset; + + if (port < 0x100000) { + io_offset = is_in_host() ? LPC_LEGACY_IO : PCI_VT_LEGACY_IO; + port = port | io_offset; + } + + return __va(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/sw_64/lib/iomap_copy.c b/arch/sw_64/lib/iomap_copy.c new file mode 100644 index 000000000000..1c75bd602d7e --- /dev/null +++ b/arch/sw_64/lib/iomap_copy.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +/** + * __iowrite32_copy - copy data to MMIO space, in 32-bit units + * @to: destination, in MMIO space (must be 32-bit aligned) + * @from: source (must be 32-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite32_copy(void __iomem *to, + const void *from, + size_t count) +{ + u32 __iomem *dst = to; + const u32 *src = from; + const u32 *end = src + count; + + while (src < end) { + __raw_writel(*src++, dst++); + mb(); + } + +} + +/** + * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units + * @to: destination, in MMIO space (must be 64-bit aligned) + * @from: source (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite64_copy(void __iomem *to, + const void *from, + size_t count) +{ + u64 __iomem *dst = to; + const u64 *src = from; + const u64 *end = src + count; + + while (src < end) { + __raw_writeq(*src++, dst++); + mb(); + } +} diff --git a/arch/sw_64/lib/memcpy.S b/arch/sw_64/lib/memcpy.S new file mode 100644 index 000000000000..31c422b393ee --- /dev/null +++ b/arch/sw_64/lib/memcpy.S @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Reasonably optimized memcpy() routine for the sw64 + * + * - memory accessed as aligned quadwords only + * - uses bcmpge to compare 8 bytes in parallel + * + * Temp usage notes: + * $1, $2, - scratch + */ +#include + .set noreorder + .set noat + + .align 4 + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + + mov $16, $0 + ble $18, $nomoredata + xor $16, $17, $1 + and $1, 7, $1 + + bne $1, $misaligned + /* source and dest are same mod 8 address */ + and $16, 7, $1 + beq $1, $both_0mod8 + + /* + * source and dest are same misalignment. move a byte at a time + * until a 0mod8 alignment for both is reached. + * At least one byte more to move + */ + +$head_align: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + stb $1, 0($16) + addl $16, 1, $16 + and $16, 7, $1 + ble $18, $nomoredata + bne $1, $head_align + +$both_0mod8: + cmple $18, 127, $1 + bne $1, $no_unroll + and $16, 63, $1 + beq $1, $do_unroll + +$single_head_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + and $16, 63, $1 + bne $1, $single_head_quad + +$do_unroll: + addl $16, 64, $7 + cmple $18, 127, $1 + bne $1, $tail_quads + +$unroll_body: + #wh64 ($7) + fillde 0($7) + + ldl $6, 0($17) + + ldl $4, 8($17) + ldl $5, 16($17) + addl $7, 64, $7 + + ldl $3, 24($17) + addl $16, 64, $1 + + addl $17, 32, $17 + stl $6, 0($16) + + stl $4, 8($16) + stl $5, 16($16) + subl $18, 192, $2 + + stl $3, 24($16) + addl $16, 32, $16 + + ldl $6, 0($17) + ldl $4, 8($17) + #cmovlt $2, $1, $7 + sellt $2, $1, $7, $7 + + ldl $5, 16($17) + ldl $3, 24($17) + addl $16, 32, $16 + subl $18, 64, $18 + + addl $17, 32, $17 + stl $6, -32($16) + stl $4, -24($16) + cmple $18, 63, $1 + + stl $5, -16($16) + stl $3, -8($16) + beq $1, $unroll_body + +$tail_quads: +$no_unroll: + .align 4 + subl $18, 8, $18 + blt $18, $less_than_8 + +$move_a_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + bge $18, $move_a_quad + +$less_than_8: + .align 4 + addl $18, 8, $18 + ble $18, $nomoredata + + /* Trailing bytes */ +$tail_bytes: + subl $18, 1, $18 + ldbu $1, 0($17) + addl $17, 1, $17 + + stb $1, 0($16) + addl $16, 1, $16 + bgt $18, $tail_bytes + + /* branching to exit takes 3 extra cycles, so replicate exit here */ + ret $31, ($26), 1 + +$misaligned: + mov $0, $4 + and $0, 7, $1 + beq $1, $dest_0mod8 + +$aligndest: + ble $18, $nomoredata + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + and $4, 7, $1 + bne $1, $aligndest + + /* Source has unknown alignment, but dest is known to be 0mod8 */ +$dest_0mod8: + subl $18, 8, $18 + blt $18, $misalign_tail + ldl_u $3, 0($17) + +$mis_quad: + ldl_u $16, 8($17) + extll $3, $17, $3 + exthl $16, $17, $1 + bis $3, $1, $1 + + subl $18, 8, $18 + addl $17, 8, $17 + stl $1, 0($4) + mov $16, $3 + + addl $4, 8, $4 + bge $18, $mis_quad + +$misalign_tail: + addl $18, 8, $18 + ble $18, $nomoredata + +$misalign_byte: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + bgt $18, $misalign_byte + + +$nomoredata: + ret $31, ($26), 1 + + .end memcpy + EXPORT_SYMBOL(memcpy) +/* For backwards module compatibility. */ +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/memmove.S b/arch/sw_64/lib/memmove.S new file mode 100644 index 000000000000..3e34fcd5b217 --- /dev/null +++ b/arch/sw_64/lib/memmove.S @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Barely optimized memmove routine for sw64. + * This is hand-massaged output from the original memcpy.c. We defer to + * memcpy whenever possible; the backwards copy loops are not unrolled. + */ +#include + .set noat + .set noreorder + .text + + .align 4 + .globl memmove + .ent memmove +memmove: + ldgp $29, 0($27) + unop + .prologue 1 + + addl $16, $18, $4 + addl $17, $18, $5 + cmpule $4, $17, $1 # dest + n <= src + cmpule $5, $16, $2 # dest >= src + n + + bis $1, $2, $1 + mov $16, $0 + xor $16, $17, $2 + bne $1, memcpy # samegp + + and $2, 7, $2 # Test for src/dest co-alignment. + and $16, 7, $1 + cmpule $16, $17, $3 + bne $3, $memmove_up # dest < src + + and $4, 7, $1 + bne $2, $misaligned_dn + unop + beq $1, $skip_aligned_byte_loop_head_dn + +$aligned_byte_loop_head_dn: + ldi $4, -1($4) + ldi $5, -1($5) + unop + ble $18, $egress + + ldbu $1, 0($5) + ldi $18, -1($18) + stb $1, 0($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_dn + +$skip_aligned_byte_loop_head_dn: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_dn + +$aligned_word_loop_dn: + ldl $1, -8($5) + ldi $5, -8($5) + ldi $18, -8($18) + + stl $1, -8($4) + ldi $4, -8($4) + bge $18, $aligned_word_loop_dn + +$skip_aligned_word_loop_dn: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_dn + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_dn: + fnop + unop + beq $18, $egress + +$byte_loop_tail_dn: + ldbu $1, -1($5) + ldi $5, -1($5) + ldi $4, -1($4) + + ldi $18, -1($18) + stb $1, 0($4) + + bgt $18, $byte_loop_tail_dn + br $egress + +$memmove_up: + mov $16, $4 + mov $17, $5 + bne $2, $misaligned_up + beq $1, $skip_aligned_byte_loop_head_up + +$aligned_byte_loop_head_up: + unop + ble $18, $egress + ldbu $1, 0($5) + + ldi $18, -1($18) + + ldi $5, 1($5) + stb $1, 0($4) + ldi $4, 1($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_up + +$skip_aligned_byte_loop_head_up: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_up + +$aligned_word_loop_up: + ldl $1, 0($5) + ldi $5, 8($5) + ldi $18, -8($18) + + stl $1, 0($4) + ldi $4, 8($4) + bge $18, $aligned_word_loop_up + +$skip_aligned_word_loop_up: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_up + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_up: + fnop + unop + beq $18, $egress + +$byte_loop_tail_up: + ldbu $1, 0($5) + ldi $18, -1($18) + + stb $1, 0($4) + + ldi $5, 1($5) + ldi $4, 1($4) + bgt $18, $byte_loop_tail_up + +$egress: + ret $31, ($26), 1 + + .end memmove + EXPORT_SYMBOL(memmove) diff --git a/arch/sw_64/lib/memset.S b/arch/sw_64/lib/memset.S new file mode 100644 index 000000000000..dbc4d775c7ea --- /dev/null +++ b/arch/sw_64/lib/memset.S @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is an efficient (and small) implementation of the C library "memset()" + * function for the sw. + * + * (C) Copyright 1996 Linus Torvalds + * + * This routine is "moral-ware": you are free to use it any way you wish, and + * the only obligation I put on you is a moral one: if you make any improvements + * to the routine, please send me your improvements for me to use similarly. + * + * The scheduling comments are according to the documentation (and done by + * hand, so they might well be incorrect, please do tell me about it..) + */ + +#include + + .set noat + .set noreorder +.text + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + + .ent ___memset +.align 5 +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + + and $17, 255, $1 + inslb $17, 1, $17 + bis $17, $1, $17 + sll $17, 16, $1 + + bis $17, $1, $17 + sll $17, 32, $1 + bis $17, $1, $17 + ldl_u $31, 0($30) + +.align 5 +__constant_c_memset: + addl $18, $16, $6 + bis $16, $16, $0 + xor $16, $6, $1 + ble $18, end + + bic $1, 7, $1 + beq $1, within_one_quad + and $16, 7, $3 + beq $3, aligned + + bis $16, $16, $5 + subl $3, 8, $3 + addl $18, $3, $18 + subl $16, $3, $16 + + eqv $3, $31, $3 + addl $3, 1, $3 +unaligned_start_loop: + stb $17, 0($5) + subl $3, 1, $3 + addl $5, 1, $5 + bgt $3, unaligned_start_loop + + +.align 4 +aligned: + sra $18, 3, $3 + and $18, 7, $18 + bis $16, $16, $5 + beq $3, no_quad + +/*added by JJ*/ + ldi $3, -8($3) + blt $3, nounrol + +.align 3 +wloop: + fillde 256($5) + stl $17, 0($5) + stl $17, 8($5) + stl $17, 16($5) + stl $17, 24($5) + subl $3, 8, $3 + stl $17, 32($5) + stl $17, 40($5) + stl $17, 48($5) + stl $17, 56($5) + addl $5, 0x40, $5 + bge $3, wloop + +nounrol: + addl $3, 8, $3 + beq $3, no_quad +/*end JJ*/ + +.align 3 +loop: + stl $17, 0($5) + subl $3, 1, $3 + addl $5, 8, $5 + bne $3, loop + +no_quad: + bis $31, $31, $31 + beq $18, end + and $6, 7, $6 +no_quad_loop: + stb $17, 0($5) + subl $6, 1, $6 + addl $5, 1, $5 + bgt $6, no_quad_loop + ret $31, ($26), 1 + +.align 3 +within_one_quad: + bis $18, $18, $1 + bis $16, $16, $5 +within_one_quad_loop: + stb $17, 0($5) + subl $1, 1, $1 + addl $5, 1, $5 + bgt $1, within_one_quad_loop + +end: + ret $31, ($26), 1 + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + or $1, $2, $1 + inslh $17, 6, $4 + or $1, $3, $1 + or $1, $4, $17 + br __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/strcpy.S b/arch/sw_64/lib/strcpy.S new file mode 100644 index 000000000000..61b6141f88e2 --- /dev/null +++ b/arch/sw_64/lib/strcpy.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strcpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a null-terminated string from SRC to DST. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strcpy + .ent strcpy +strcpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + bis $31, $31, $6 + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +/* copy is done, return */ +$out: + ret + + .end strcpy + EXPORT_SYMBOL(strcpy) diff --git a/arch/sw_64/lib/strncpy.S b/arch/sw_64/lib/strncpy.S new file mode 100644 index 000000000000..f50c70599bb4 --- /dev/null +++ b/arch/sw_64/lib/strncpy.S @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strncpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a string from SRC to DST. At most SIZE bytes are coppied. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy in head + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strncpy + .ent strncpy +strncpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + beq $18, $out # return if size is 0 + cmplt $18, 8, $5 # size less than 8, do 1-byte copy + bne $5, $tail_loop + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $18, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + subl $18, $6, $18 # sub bytes going to be copy + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + +$mis_una_head: + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $5, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +$null_padding: + addl $16, 1, $16 + subl $18, 1, $18 + stb $31, 0($16) + beq $18, $out + br $31, $null_padding + +/* copy is done, return */ +$out: + ret + + .end strncpy + EXPORT_SYMBOL(strncpy) diff --git a/arch/sw_64/lib/uaccess_flushcache.c b/arch/sw_64/lib/uaccess_flushcache.c new file mode 100644 index 000000000000..353d5ac15248 --- /dev/null +++ b/arch/sw_64/lib/uaccess_flushcache.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(memcpy_flushcache); + +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len) +{ + memcpy_flushcache(to, page_address(page) + offset, len); +} + +unsigned long __copy_user_flushcache(void *to, const void __user *from, + unsigned long n) +{ + unsigned long rc = __copy_from_user(to, from, n); + + flush_cache_all(); + return rc; +} + +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); +#endif diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c new file mode 100644 index 000000000000..59ca8a97d748 --- /dev/null +++ b/arch/sw_64/lib/udelay.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1993, 2000 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + */ + +#include + +/* + * Use only for very small delays (< 1 msec). + * + * The active part of our cycle counter is only 32-bits wide, and + * we're treating the difference between two marks as signed. On + * a 1GHz box, that's about 2 seconds. + */ +void __delay(unsigned long loops) +{ + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(__delay); + +void udelay(unsigned long usecs) +{ + unsigned long loops = usecs * get_cpu_freq() / 1000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(udelay); + +void ndelay(unsigned long nsecs) +{ + unsigned long loops = nsecs * get_cpu_freq() / 1000000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(ndelay); -- Gitee From 3034a80849853774568ac8cf2c33bdc287655770 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:39 +0800 Subject: [PATCH 307/953] anolis: sw64: add VDSO support ANBZ: #4688 Add VDSO support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/vdso.h | 116 ++++++++++++++ arch/sw_64/kernel/vdso.c | 143 ++++++++++++++++++ arch/sw_64/kernel/vdso/.gitignore | 4 + arch/sw_64/kernel/vdso/Makefile | 74 +++++++++ arch/sw_64/kernel/vdso/so2s.sh | 4 + arch/sw_64/kernel/vdso/vdso.S | 30 ++++ arch/sw_64/kernel/vdso/vdso.lds.S | 89 +++++++++++ arch/sw_64/kernel/vdso/vgettimeofday.c | 201 +++++++++++++++++++++++++ arch/sw_64/kernel/vdso/vrt_sigreturn.S | 68 +++++++++ 9 files changed, 729 insertions(+) create mode 100644 arch/sw_64/include/asm/vdso.h create mode 100644 arch/sw_64/kernel/vdso.c create mode 100644 arch/sw_64/kernel/vdso/.gitignore create mode 100644 arch/sw_64/kernel/vdso/Makefile create mode 100755 arch/sw_64/kernel/vdso/so2s.sh create mode 100644 arch/sw_64/kernel/vdso/vdso.S create mode 100644 arch/sw_64/kernel/vdso/vdso.lds.S create mode 100644 arch/sw_64/kernel/vdso/vgettimeofday.c create mode 100644 arch/sw_64/kernel/vdso/vrt_sigreturn.S diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h new file mode 100644 index 000000000000..7a2e23c648f3 --- /dev/null +++ b/arch/sw_64/include/asm/vdso.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SW64 Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_VDSO_H +#define _ASM_SW64_VDSO_H + +#ifdef __KERNEL__ + +/* + * Default link address for the vDSO. + * Since we randomise the VDSO mapping, there's little point in trying + * to prelink this. + */ +#define VDSO_LBASE 0x0 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const unsigned long __vdso_##name; \ + ((unsigned long)(base) + __vdso_##name); \ +}) + + +struct vdso_data { + u64 xtime_sec; + u64 xtime_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; + u32 cs_shift; + u32 cs_mult; + u64 cs_cycle_last; + u64 cs_mask; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 seq_count; +}; + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr, tmp; + __asm__ __volatile__( + " br %1, 1f\n" + "1: ldi %0, 0(%1)\n" + : "=r" (addr), "=&r" (tmp) + ::); + + addr &= ~(PAGE_SIZE - 1); + return addr; +} + +static inline const struct vdso_data *get_vdso_data(void) +{ + return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE); +} + +static inline u32 vdso_data_read_begin(const struct vdso_data *data) +{ + u32 seq; + + while (true) { + seq = READ_ONCE(data->seq_count); + if (likely(!(seq & 1))) { + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return seq; + } + + cpu_relax(); + } +} + +static inline bool vdso_data_read_retry(const struct vdso_data *data, + u32 start_seq) +{ + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return unlikely(data->seq_count != start_seq); +} + +static inline void vdso_data_write_begin(struct vdso_data *data) +{ + ++data->seq_count; + + /* Ensure sequence update is written before other data page values. */ + smp_wmb(); +} + +static inline void vdso_data_write_end(struct vdso_data *data) +{ + /* Ensure data values are written before updating sequence again. */ + smp_wmb(); + ++data->seq_count; +} + + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_VDSO_H */ diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c new file mode 100644 index 000000000000..b4126cbaa4bd --- /dev/null +++ b/arch/sw_64/kernel/vdso.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + +#include + +extern char vdso_start, vdso_end; +static unsigned long vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +static struct vm_special_mapping vdso_spec[2]; + +static int __init vdso_init(void) +{ + int i; + + if (memcmp(&vdso_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; + } + + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); + + /* Allocate the vDSO pagelist, plus a page for the data. */ + vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + + /* Grab the vDSO code pages. */ + for (i = 0; i < vdso_pages; i++) + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = &vdso_pagelist[1], + }; + + return 0; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; + + vdso_text_len = vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + + if (down_write_killable(&mm->mmap_lock)) + return -EINTR; + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto up_fail; + } + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[0]); + if (IS_ERR(ret)) + goto up_fail; + + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[1]); + if (IS_ERR(ret)) + goto up_fail; + + up_write(&mm->mmap_lock); + return 0; + +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_lock); + return PTR_ERR(ret); +} + +void update_vsyscall(struct timekeeper *tk) +{ + vdso_data_write_begin(vdso_data); + + vdso_data->xtime_sec = tk->xtime_sec; + vdso_data->xtime_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; + vdso_data->cs_shift = tk->tkr_mono.shift; + + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->cs_mask = tk->tkr_mono.mask; + + vdso_data_write_end(vdso_data); +} + +void update_vsyscall_tz(void) +{ + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; +} diff --git a/arch/sw_64/kernel/vdso/.gitignore b/arch/sw_64/kernel/vdso/.gitignore new file mode 100644 index 000000000000..2b6a8b0ed7ca --- /dev/null +++ b/arch/sw_64/kernel/vdso/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +vdso.lds +vdso.so.dbg.tmp +vdso-syms.S diff --git a/arch/sw_64/kernel/vdso/Makefile b/arch/sw_64/kernel/vdso/Makefile new file mode 100644 index 000000000000..190cc345dbb9 --- /dev/null +++ b/arch/sw_64/kernel/vdso/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +# Symbols present in the vdso +vdso-syms = rt_sigreturn gettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, v%.o, $(vdso-syms)) + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +extra-y += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# vDSO code runs in userspace and -pg doesn't help with profiling anyway. +CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vrt_sigreturn.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = -pg + +ifdef CONFIG_FEEDBACK_COLLECT +# vDSO code runs in userspace, not collecting feedback data. +CFLAGS_REMOVE_vdso.o = -ffeedback-generate +CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate +CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate +endif + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +# link rule for the .so file, .lds has to be first +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=both) + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions. +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + + +vdso_install: vdso.so diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh new file mode 100755 index 000000000000..e1763af8e730 --- /dev/null +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S new file mode 100644 index 000000000000..edd9be27db9d --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/sw_64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S new file mode 100644 index 000000000000..de1782ccb7b6 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GNU linker script for the VDSO library. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) + +SECTIONS +{ + PROVIDE(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { *(.text*) } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + local: *; + }; +} diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c new file mode 100644 index 000000000000..0aa16e988e88 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call %3\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime), "i"(HMC_callsys) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} + +static __always_inline int do_realtime_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + } while (vdso_data_read_retry(data, start_seq)); + + return 0; +} + + +static __always_inline int do_monotonic_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + timespec64_add_ns(ts, to_mono_nsec); + + return 0; +} + +#if defined(CONFIG_SUBARCH_C3B) +static __always_inline u64 read_longtime(void) +{ + register unsigned long __r0 __asm__("$0"); + + __asm__ __volatile__( + "sys_call %1" : "=r"(__r0) : "i" (HMC_longtime)); + + return __r0; +} +#elif defined(CONFIG_SUBARCH_C4) +static __always_inline u64 read_longtime(void) +{ + return read_csr(CSR_SHTCLOCK); +} +#endif + +static __always_inline u64 get_ns(const struct vdso_data *data) +{ + u64 cycle_now, delta, nsec; + + cycle_now = read_longtime(); + delta = (cycle_now - data->cs_cycle_last) & data->cs_mask; + + nsec = (delta * data->cs_mult) + data->xtime_nsec; + nsec >>= data->cs_shift; + + return nsec; +} + + +static __always_inline int do_realtime(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns); + + return 0; +} + +static __always_inline int do_monotonic(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns + to_mono_nsec); + + return 0; +} + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + const struct vdso_data *data = get_vdso_data(); + struct timespec64 ts; + int ret; + + ret = do_realtime(&ts, data); + if (ret) + return ret; + + if (tv) { + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; + } + + if (tz) { + tz->tz_minuteswest = data->tz_minuteswest; + tz->tz_dsttime = data->tz_dsttime; + } + + return 0; +} + +int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) +{ + const struct vdso_data *data = get_vdso_data(); + int ret; + + switch (clkid) { + case CLOCK_REALTIME_COARSE: + ret = do_realtime_coarse(ts, data); + break; + case CLOCK_MONOTONIC_COARSE: + ret = do_monotonic_coarse(ts, data); + break; + case CLOCK_REALTIME: + ret = do_realtime(ts, data); + break; + case CLOCK_MONOTONIC: + ret = do_monotonic(ts, data); + break; + default: + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); + } + + return ret; +} diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S new file mode 100644 index 000000000000..cdbf6501ad64 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + + .text + + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS (-RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX) + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop +ENTRY(__vdso_rt_sigreturn) + mov $sp, $16 + ldi $0, __NR_rt_sigreturn + sys_call HMC_callsys +ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc -- Gitee From 6caacba24daaaf7fb29e6885508ff4349028db87 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:35 +0800 Subject: [PATCH 308/953] anolis: sw64: add SMP support ANBZ: #4688 Add Symmetric Multi-Processing (SMP) support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/smp.h | 95 ++++++ arch/sw_64/kernel/smp.c | 578 +++++++++++++++++++++++++++++++++++ 2 files changed, 673 insertions(+) create mode 100644 arch/sw_64/include/asm/smp.h create mode 100644 arch/sw_64/kernel/smp.c diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h new file mode 100644 index 000000000000..3a2fcf62b30c --- /dev/null +++ b/arch/sw_64/include/asm/smp.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SMP_H +#define _ASM_SW64_SMP_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ + +extern cpumask_t core_start; + +static inline unsigned long +read_vpcr(void) +{ + register unsigned long __r0 __asm__("$0"); + __asm__ __volatile__( + "sys_call %1 #rvpcr" + : "=r"(__r0) + : "i" (0x39) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#ifdef CONFIG_SMP +/* SMP initialization hook for setup_arch */ +void __init setup_smp(void); + +#include + +/* smp reset control block */ +struct smp_rcb_struct { + void (*restart_entry)(unsigned long args); + unsigned long restart_args; + unsigned long ready; + unsigned long init_done; +}; + +#define INIT_SMP_RCB ((struct smp_rcb_struct *) __va(0x820000UL)) + + +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#include +#define raw_smp_processor_id() (*((unsigned int *)((void *)current + TASK_CPU))) +#endif +#define hard_smp_processor_id() cpu_to_rcid(raw_smp_processor_id()) + +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[cpu] +#define cpu_physical_id(cpu) __cpu_to_rcid[cpu] + +extern unsigned long tidle_pcb[NR_CPUS]; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 +#define smp_call_function_on_cpu(func, info, wait, cpu) ({ 0; }) +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[0] +#define cpu_physical_id(cpu) __cpu_to_rcid[0] +#endif /* CONFIG_SMP */ + +#define NO_PROC_ID (-1) + +static inline void send_ipi(int cpu, unsigned long type) +{ + int rcid; + + rcid = cpu_to_rcid(cpu); + + if (is_in_guest()) + hcall(HCALL_IVI, rcid, type, 0); + else + sendii(rcid, type, 0); +} + +#define reset_cpu(cpu) send_ipi((cpu), II_RESET) + +#endif /* _ASM_SW64_SMP_H */ diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c new file mode 100644 index 000000000000..6d1aab4be1c0 --- /dev/null +++ b/arch/sw_64/kernel/smp.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/smp.c + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +struct smp_rcb_struct *smp_rcb; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; + +int smp_booted; + +void *idle_task_pointer[NR_CPUS]; + +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + +/* A collection of single bit ipi messages. */ +static struct { + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +enum ipi_message_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, +}; + +int smp_num_cpus = 1; /* Number that came online. */ +EXPORT_SYMBOL(smp_num_cpus); + +#define send_sleep_interrupt(cpu) send_ipi((cpu), II_SLEEP) +#define send_wakeup_interrupt(cpu) send_ipi((cpu), II_WAKE) + +/* + * Where secondaries begin a life of C. + */ +void smp_callin(void) +{ + int cpuid = smp_processor_id(); + + local_irq_disable(); + + if (cpu_online(cpuid)) { + pr_err("??, cpu 0x%x already present??\n", cpuid); + BUG(); + } + set_cpu_online(cpuid, true); + + /* clear ksp, usp */ + wrksp(0); + wrusp(0); + + /* Set trap vectors. */ + trap_init(); + + /* Set interrupt vector. */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + wrent(entInt, 0); + + /* Get our local ticker going. */ + sw64_setup_timer(); + + /* All kernel threads share the same mm context. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + /* update csr:ptbr */ + update_ptbr_sys(virt_to_phys(init_mm.pgd)); + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + per_cpu(cpu_state, cpuid) = CPU_ONLINE; + per_cpu(hard_node_id, cpuid) = rcid_to_domain_id(cpu_to_rcid(cpuid)); + + /* Must have completely accurate bogos. */ + local_irq_enable(); + + /* Cpu0 init preempt_count at start_kernel, other smp cpus do here. */ + preempt_disable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + + +/* + * Set ready for secondary cpu. + */ +static inline void set_secondary_ready(int cpuid) +{ + smp_rcb->ready = cpuid; +} + +/* + * Convince the hmcode to have a secondary cpu begin execution. + */ +static int secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + unsigned long timeout; + /* + * Precalculate the target ksp. + */ + idle_task_pointer[cpuid] = idle; + + set_cpu_online(cpuid, false); + wmb(); + + set_secondary_ready(cpuid); + + /* Wait 10 seconds for secondary cpu. */ + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpuid)) + goto started; + udelay(10); + barrier(); + } + pr_err("SMP: Processor %d failed to start.\n", cpuid); + return -1; + +started: + store_cpu_topology(cpuid); + numa_add_cpu(cpuid); + return 0; +} + +/* + * Bring one cpu online. + */ +static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) +{ + per_cpu(cpu_state, cpuid) = CPU_UP_PREPARE; + + return secondary_cpu_start(cpuid, idle); +} + +static void __init process_nr_cpu_ids(void) +{ + int i; + + for (i = nr_cpu_ids; i < NR_CPUS; i++) { + set_cpu_possible(i, false); + set_cpu_present(i, false); + } + + nr_cpu_ids = num_possible_cpus(); +} + +void __init smp_rcb_init(void) +{ + smp_rcb = INIT_SMP_RCB; + memset(smp_rcb, 0, sizeof(struct smp_rcb_struct)); + /* Setup SMP_RCB fields that uses to activate secondary CPU */ + smp_rcb->restart_entry = __smp_callin; + smp_rcb->init_done = 0xDEADBEEFUL; + mb(); +} + +/* + * Called from setup_arch. Detect an SMP system and which processors + * are present. + */ +void __init setup_smp(void) +{ + int i = 0, num = 0; + + init_cpu_possible(cpu_none_mask); + + /* For unified kernel, NR_CPUS is the maximum possible value */ + for (; i < NR_CPUS; i++) { + if (cpu_to_rcid(i) != -1) { + set_cpu_possible(num, true); + store_cpu_data(num); + if (!cpumask_test_cpu(i, &cpu_offline)) + set_cpu_present(num, true); + num++; + } + } + + process_nr_cpu_ids(); + + pr_info("Detected %u possible CPU(s), %u CPU(s) are present\n", + nr_cpu_ids, num_present_cpus()); + + smp_rcb_init(); +} +/* + * Called by smp_init prepare the secondaries + */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu; + /* Take care of some initial bookkeeping. */ + memset(ipi_data, 0, sizeof(ipi_data)); + + init_cpu_topology(); + store_cpu_topology(smp_processor_id()); + numa_add_cpu(smp_processor_id()); + + for_each_possible_cpu(cpu) { + numa_store_cpu_info(cpu); + } + + /* Nothing to do on a UP box, or when told not to. */ + if (nr_cpu_ids == 1 || max_cpus == 0) { + init_cpu_possible(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + pr_info("SMP mode deactivated.\n"); + return; + } + + pr_info("SMP starting up secondaries.\n"); +} + +void smp_prepare_boot_cpu(void) +{ + int me = smp_processor_id(); + + per_cpu(cpu_state, me) = CPU_ONLINE; +} + +int vt_cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + pr_info("%s: cpu = %d\n", __func__, cpu); + + wmb(); + smp_rcb->ready = 0; + if (smp_booted) { + /* irq must be disabled before reset vCPU */ + reset_cpu(cpu); + } + smp_boot_one_cpu(cpu, tidle); + + return cpu_online(cpu) ? 0 : -EIO; +} + +#ifdef CONFIG_SUBARCH_C3B +DECLARE_STATIC_KEY_FALSE(use_tc_as_sched_clock); +#endif + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + if (is_in_guest()) + return vt_cpu_up(cpu, tidle); + + wmb(); + smp_rcb->ready = 0; + + /* send wake up signal */ + send_wakeup_interrupt(cpu); + /* send reset signal */ + if (smp_booted) { + if (is_in_host()) { + reset_cpu(cpu); + } else { + while (1) + cpu_relax(); + } + } + smp_boot_one_cpu(cpu, tidle); + +#ifdef CONFIG_SUBARCH_C3B + if (static_branch_likely(&use_tc_as_sched_clock)) { + if (smp_booted) { + tc_sync_clear(); + smp_call_function_single(cpu, tc_sync_ready, NULL, 0); + tc_sync_set(); + } + } +#endif + + return cpu_online(cpu) ? 0 : -EIO; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + smp_booted = 1; + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + + +static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + mb(); + for_each_cpu(i, to_whom) + set_bit(operation, &ipi_data[i].bits); + + mb(); + for_each_cpu(i, to_whom) + send_ipi(i, II_II0); +} + +static void ipi_cpu_stop(int cpu) +{ + local_irq_disable(); + set_cpu_online(cpu, false); + while (1) + wait_for_interrupt(); +} + +void handle_ipi(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[cpu].bits; + unsigned long ops; + + mb(); /* Order interrupt and bit testing. */ + while ((ops = xchg(pending_ipis, 0)) != 0) { + mb(); /* Order bit clearing and data access. */ + do { + unsigned long which; + + which = ops & -ops; + ops &= ~which; + which = __ffs(which); + + switch (which) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + break; + + case IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + + default: + pr_crit("Unknown IPI on CPU %d: %lu\n", cpu, which); + break; + } + } while (ops); + + mb(); /* Order data access and bit testing. */ + } + + cpu_data[cpu].ipi_count++; +} + +void arch_smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} +EXPORT_SYMBOL(arch_smp_send_reschedule); + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + /* Although we don't have any data to pass, we do want to + * synchronize with the other processors. + */ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +static void ipi_flush_tlb_mm(void *x) +{ + local_flush_tlb_mm((struct mm_struct *)x); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + + /* happens as a result of exit_mmap() + * Shall we clear mm->context.asid[] here? + */ + if (atomic_read(&mm->mm_users) == 0) + return; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_mm(mm); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +struct flush_tlb_info { + struct vm_area_struct *vma; + unsigned long addr; +#define start addr + unsigned long end; +}; + +static void ipi_flush_tlb_page(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_page(info->vma, info->addr); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + struct flush_tlb_info info = { + .vma = vma, + .addr = addr, + }; + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_page, &info, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_page(vma, addr); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +/* It always flush the whole user tlb by now. To be optimized. */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); + +static void ipi_flush_tlb_kernel_range(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_kernel_range(info->start, info->end); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_info info = { + .start = start, + .end = end, + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &info, 1); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + clear_tasks_mm_cpumask(cpu); + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* We don't do anything here: idle task is faking death itself. */ + unsigned int i; + + for (i = 0; i < 10; i++) { + /* They ack this in play_dead by setting CPU_DEAD */ + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + smp_rcb->ready = 0; + return; + } + msleep(100); + } + pr_err("CPU %u didn't die...\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + mb(); + __this_cpu_write(cpu_state, CPU_DEAD); + fixup_irqs(); + local_irq_disable(); + + if (is_in_guest()) { + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + hcall(HCALL_STOP, 0, 0, 0); + } else { + wrtimer(0); + } + +#ifdef CONFIG_SUSPEND + sleepen(); + send_sleep_interrupt(smp_processor_id()); + while (1) + asm("nop"); +#else + asm volatile("memb"); + asm volatile("halt"); +#endif +} +#endif -- Gitee From 6ece4b54e9f3dec8fd5ccdaa577b52960904632a Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:28 +0800 Subject: [PATCH 309/953] anolis: sw64: add NUMA support ANBZ: #4688 Add Non Uniform Memory Access (NUMA) support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/numa.h | 36 +++ arch/sw_64/mm/numa.c | 466 ++++++++++++++++++++++++++++++++++ 2 files changed, 502 insertions(+) create mode 100644 arch/sw_64/include/asm/numa.h create mode 100644 arch/sw_64/mm/numa.c diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h new file mode 100644 index 000000000000..a2e3171caff1 --- /dev/null +++ b/arch/sw_64/include/asm/numa.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_NUMA_H +#define _ASM_SW64_NUMA_H + +#include +#include + +#ifdef CONFIG_NUMA +extern nodemask_t numa_nodes_parsed __initdata; +extern int numa_off; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void numa_clear_node(unsigned int cpu); +extern void __init numa_set_distance(int from, int to, int distance); +extern void __init early_map_cpu_to_node(unsigned int cpu, int nid); + +#else /* CONFIG_NUMA */ + +static inline void numa_clear_node(unsigned int cpu) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_SW64_NUMA_H */ diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c new file mode 100644 index 000000000000..fcf1f97a7840 --- /dev/null +++ b/arch/sw_64/mm/numa.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DISCONTIGMEM NUMA sw64 support. + */ + +#include +#include +#include +#include + +#include + +int cpu_to_node_map[NR_CPUS]; +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +struct numa_node_desc_t numa_nodes_desc[MAX_NUMNODES]; +nodemask_t numa_nodes_parsed __initdata; + +static int numa_distance_cnt; +static u8 *numa_distance; +int numa_off; + +static __init int numa_setup(char *opt) +{ + if (!opt) + return -EINVAL; + if (!strncmp(opt, "off", 3)) + numa_off = 1; + return 0; +} +early_param("numa", numa_setup); + +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: cpumask_of_node() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +static void __init setup_node_to_cpumask_map(void) +{ + int node; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); + + /* allocate and clear the mapping */ + for (node = 0; node < nr_node_ids; node++) { + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); + cpumask_clear(node_to_cpumask_map[node]); + } + + /* cpumask_of_node() will now work */ + pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); +} + +/** + * numa_add_memblk - Set node id to memblk + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int ret; + + ret = memblock_set_node(start, (end - start), &memblock.memory, nid); + if (ret < 0) { + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", + start, (end - 1), nid); + return ret; + } + + node_set(nid, numa_nodes_parsed); + return ret; +} + +/** + * Initialize NODE_DATA for a node on the local memory + */ +static void __init setup_node_data(int nid, unsigned long start_pfn, unsigned long end_pfn) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (start_pfn >= end_pfn) + pr_info("Initmem setup node %d []\n", nid); + + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +/** + * numa_free_distance + * + * Free current distance table. + */ +void __init numa_free_distance(void) +{ + size_t size; + + if (!numa_distance) + return; + + size = numa_distance_cnt * numa_distance_cnt * + sizeof(numa_distance[0]); + + memblock_free(numa_distance, size); + numa_distance_cnt = 0; + numa_distance = NULL; +} + +/** + * + * Create a new NUMA distance table. + * + */ +static int __init numa_alloc_distance(void) +{ + size_t size; + phys_addr_t phys; + int i, j; + + size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (WARN_ON(!phys)) + return -ENOMEM; + + numa_distance = __va(phys); + numa_distance_cnt = nr_node_ids; + + /* fill with the default distances */ + for (i = 0; i < numa_distance_cnt; i++) + for (j = 0; j < numa_distance_cnt; j++) { + numa_distance[i * numa_distance_cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + } + + pr_info("Initialized distance table, cnt=%d\n", numa_distance_cnt); + + return 0; +} + +/** + * numa_set_distance - Set inter node NUMA distance from node to node. + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. + * If distance table doesn't exist, a warning is printed. + * + * If @from or @to is higher than the highest known node or lower than zero + * or @distance doesn't make sense, the call is ignored. + * + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + pr_warn_once("Warning: distance table not allocated yet\n"); + return; + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +/** + * Return NUMA distance @from to @to + */ +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +static int __init numa_register_nodes(void) +{ + int nid; + struct memblock_region *mblk; + + /* Check that valid nid is set to memblks */ + for_each_mem_region(mblk) { + pr_info("memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { + pr_warn("Warning: invalid memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + return -EINVAL; + } + } + + /* Finally register nodes */ + for_each_node_mask(nid, numa_nodes_parsed) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + setup_node_data(nid, start_pfn, end_pfn); + node_set_online(nid); + } + + /* Setup online nodes to actual nodes */ + node_possible_map = numa_nodes_parsed; + + return 0; +} + +static int __init numa_init(int (*init_func)(void)) +{ + int ret; + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + numa_free_distance(); + + ret = numa_alloc_distance(); + if (ret < 0) + return ret; + + ret = init_func(); + if (ret < 0) + return ret; + + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); + return -EINVAL; + } + + ret = numa_register_nodes(); + if (ret < 0) + return ret; + + setup_node_to_cpumask_map(); + + return 0; +} + +static void __init get_numa_info_socket(void) +{ + int i; + + phys_addr_t base = 0; + + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + numa_nodes_desc[i].base = base; + numa_nodes_desc[i].size = socket_desc[i].socket_mem; + base += numa_nodes_desc[i].size; + } + } +} + +static int __init manual_numa_init(void) +{ + int ret, nid; + struct memblock_region *mblk; + phys_addr_t node_base, node_size, node_end; + + if (numa_off) { + pr_info("NUMA disabled\n"); /* Forced off on command line. */ + pr_info("Faking one node at [mem %#018llx-%#018llx]\n", + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); + for_each_mem_region(mblk) { + ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); + if (!ret) + continue; + + pr_err("NUMA init failed\n"); + return ret; + } + } else { + get_numa_info_socket(); + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + node_base = numa_nodes_desc[nid].base; + node_size = numa_nodes_desc[nid].size; + node_end = node_base + node_size; + ret = 0; + + if (!node_end) + continue; + + for_each_mem_region(mblk) { + if (mblk->base >= node_base && mblk->base < node_end) { + if (mblk->base + mblk->size < node_end) + ret = numa_add_memblk(nid, mblk->base, mblk->base + mblk->size); + else + ret = numa_add_memblk(nid, mblk->base, node_end); + } + } + + if (!node_size) { + memblock_add_node(node_base, node_size, nid, MEMBLOCK_NONE); + node_set(nid, numa_nodes_parsed); + pr_info("Setup empty node %d from %#llx\n", nid, node_base); + } + + if (!ret) + continue; + + pr_err("NUMA init failed for node %d, [mem %#018llx-%#018llx]", + nid, node_base, node_end - 1); + } + } + + return 0; +} + +void __init sw64_numa_init(void) +{ + if (!numa_off) { + if (!acpi_disabled && !numa_init(acpi_numa_init)) + return; + if (acpi_disabled && !numa_init(of_numa_init)) + return; + } + + numa_init(manual_numa_init); +} + +void cpu_set_node(void) +{ + int i; + + if (numa_off) { + for (i = 0; i < nr_cpu_ids; i++) + cpu_to_node_map[i] = 0; + } else { + int rr, default_node, cid; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + cid = cpu_to_rcid(i); + default_node = rcid_to_domain_id(cid); + if (node_online(default_node)) { + cpu_to_node_map[i] = default_node; + } else { + cpu_to_node_map[i] = rr; + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } + } + } + /* + * Setup numa_node for cpu 0 before per_cpu area for booting. + * Actual setup of numa_node will be done in native_smp_prepare_cpus(). + */ + set_cpu_numa_node(0, cpu_to_node_map[0]); +} + +void numa_store_cpu_info(unsigned int cpu) +{ + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); +} + +void __init early_map_cpu_to_node(unsigned int cpu, int nid) +{ + /* fallback to node 0 */ + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) + nid = 0; + + cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const struct cpumask *cpumask_of_node(int node) +{ + + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); + return cpu_all_mask; + } + + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); + return cpu_none_mask; + } + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); + return cpu_online_mask; + } + + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); +#endif + +static void numa_update_cpu(unsigned int cpu, bool remove) +{ + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void numa_add_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, false); +} + +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); + set_cpu_numa_node(cpu, NUMA_NO_NODE); +} -- Gitee From b16d1448f5558d37bcd001a70a15436e4f06cb75 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:05 +0800 Subject: [PATCH 310/953] anolis: sw64: add default configs ANBZ: #4688 Add default config files for SW64 based xuelang and junzhang platforms. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/configs/junzhang_defconfig | 667 +++++++++++++++++++++ arch/sw_64/configs/kata_guest_defconfig | 633 ++++++++++++++++++++ arch/sw_64/configs/kata_xuelang_defconfig | 616 ++++++++++++++++++++ arch/sw_64/configs/xuelang_defconfig | 668 ++++++++++++++++++++++ 4 files changed, 2584 insertions(+) create mode 100644 arch/sw_64/configs/junzhang_defconfig create mode 100644 arch/sw_64/configs/kata_guest_defconfig create mode 100644 arch/sw_64/configs/kata_xuelang_defconfig create mode 100644 arch/sw_64/configs/xuelang_defconfig diff --git a/arch/sw_64/configs/junzhang_defconfig b/arch/sw_64/configs/junzhang_defconfig new file mode 100644 index 000000000000..4f25770ca193 --- /dev/null +++ b/arch/sw_64/configs/junzhang_defconfig @@ -0,0 +1,667 @@ +CONFIG_LOCALVERSION="-junzhang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SUBARCH_C4=y +CONFIG_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=64 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_guest_defconfig b/arch/sw_64/configs/kata_guest_defconfig new file mode 100644 index 000000000000..8122155c1276 --- /dev/null +++ b/arch/sw_64/configs/kata_guest_defconfig @@ -0,0 +1,633 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_USE_OF=y +CONFIG_SW64_BUILTIN_DTB=y +CONFIG_SW64_BUILTIN_DTB_NAME="chip_vt" +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_NONBOOT_CORE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_SUNWAY_SUPERIO_AST2400=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_xuelang_defconfig b/arch/sw_64/configs/kata_xuelang_defconfig new file mode 100644 index 000000000000..f553f0e71dbf --- /dev/null +++ b/arch/sw_64/configs/kata_xuelang_defconfig @@ -0,0 +1,616 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +# CONFIG_SUSPEND is not set +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_DROP_MONITOR=m +# CONFIG_WIRELESS is not set +CONFIG_CAIF=m +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/xuelang_defconfig b/arch/sw_64/configs/xuelang_defconfig new file mode 100644 index 000000000000..b1c0101d0089 --- /dev/null +++ b/arch/sw_64/configs/xuelang_defconfig @@ -0,0 +1,668 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_CHIP3=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_SUNWAY_IOMMU=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set -- Gitee From e83269e6395063584c52d3681d0f65d77b1c4398 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:29 +0800 Subject: [PATCH 311/953] anolis: sw64: add PCI support ANBZ: #4688 Add basic PCI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/pci.h | 163 ++++++++++ arch/sw_64/include/asm/pci_impl.h | 27 ++ arch/sw_64/kernel/pci-noop.c | 138 ++++++++ arch/sw_64/pci/Makefile | 8 + arch/sw_64/pci/pci-legacy.c | 508 ++++++++++++++++++++++++++++++ arch/sw_64/pci/pci-sysfs.c | 359 +++++++++++++++++++++ arch/sw_64/pci/pci.c | 436 +++++++++++++++++++++++++ 7 files changed, 1639 insertions(+) create mode 100644 arch/sw_64/include/asm/pci.h create mode 100644 arch/sw_64/include/asm/pci_impl.h create mode 100644 arch/sw_64/kernel/pci-noop.c create mode 100644 arch/sw_64/pci/Makefile create mode 100644 arch/sw_64/pci/pci-legacy.c create mode 100644 arch/sw_64/pci/pci-sysfs.c create mode 100644 arch/sw_64/pci/pci.c diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h new file mode 100644 index 000000000000..21bfcef21c5f --- /dev/null +++ b/arch/sw_64/include/asm/pci.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PCI_H +#define _ASM_SW64_PCI_H + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * The following structure is used to manage multiple PCI busses. + */ + +struct pci_dev; +struct pci_bus; +struct resource; +struct sunway_iommu; +struct page; + +struct piu_saved { + unsigned long piuconfig0; + unsigned long piuconfig1; + unsigned long epdmabar; + unsigned long msiaddr; + unsigned long msiconfig[256]; + unsigned long iommuexcpt_ctrl; + unsigned long dtbaseaddr; + unsigned long hpintconfig; + unsigned long pmeintconfig; + unsigned long aererrintconfig; + unsigned long intaconfig; + unsigned long intbconfig; + unsigned long intcconfig; + unsigned long intdconfig; +}; + +/* A controller. Used to manage multiple PCI busses. */ +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + struct resource *io_space; + struct resource *mem_space; + struct resource *pre_mem_space; + struct resource *busn_space; + unsigned long sparse_mem_base; + unsigned long dense_mem_base; + unsigned long sparse_io_base; + unsigned long dense_io_base; + + /* This one's for the kernel only. It's in KSEG somewhere. */ + void __iomem *ep_config_space_base; + void __iomem *rc_config_space_base; + + unsigned long index; + unsigned long node; + DECLARE_BITMAP(piu_msiconfig, 256); + int int_irq; + int service_irq; + /* For compatibility with current (as of July 2003) pciutils + * and XFree86. Eventually will be removed. + */ + unsigned int need_domain_info; + bool iommu_enable; + struct sunway_iommu *pci_iommu; + int first_busno; + int last_busno; + int self_busno; + void *sysdata; +}; + +/* Override the logic in pci_scan_bus for skipping already-configured + * bus numbers. + */ + +#define pcibios_assign_all_busses() (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +extern void __init sw64_init_pci(void); +extern void __init sw64_device_interrupt(unsigned long vector); +extern void __init sw64_init_irq(void); +extern void __init sw64_init_arch(void); +extern struct pci_ops sw64_pci_ops; +extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +extern struct pci_controller *hose_head; +#ifdef CONFIG_PCI_SW64 +extern void __init setup_chip_pci_ops(void); +#else +#define setup_chip_pci_ops() do { } while (0) +#endif + +extern struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus); +extern struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num); + +extern void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge); +extern void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge); +extern int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + return hose->need_domain_info; +} +#endif + +#ifdef CONFIG_NUMA +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = pci_bus_to_pci_controller(bus); + if (!node_online(hose->node)) + return next_node_in(hose->node, node_online_map); + else + return hose->node; +} +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#endif + +#endif /* __KERNEL__ */ + +/* Values for the `which' argument to sys_pciconfig_iobase. */ +#define IOBASE_HOSE 0 +#define IOBASE_SPARSE_MEM 1 +#define IOBASE_DENSE_MEM 2 +#define IOBASE_SPARSE_IO 3 +#define IOBASE_DENSE_IO 4 +#define IOBASE_ROOT_BUS 5 +#define IOBASE_FROM_HOSE 0x10000 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); +#define HAVE_PCI_LEGACY 1 + +extern int pci_create_resource_files(struct pci_dev *dev); +extern void pci_remove_resource_files(struct pci_dev *dev); +extern void __init reserve_mem_for_pci(void); +extern int chip_pcie_configure(struct pci_controller *hose); + +#define PCI_VENDOR_ID_JN 0x5656 +#define PCI_DEVICE_ID_SW64_ROOT_BRIDGE 0x3231 +#define PCI_DEVICE_ID_JN_PCIESW 0x1000 +#define PCI_DEVICE_ID_JN_PCIEUSIP 0x1200 +#define PCI_DEVICE_ID_JN_PCIE2PCI 0x1314 + +#define NR_IRQ_VECTORS NR_IRQS + +#define LAST_DEVICE_VECTOR 31 + +#define PCITODMA_OFFSET 0x0 /*0 offset*/ + +#endif /* _ASM_SW64_PCI_H */ diff --git a/arch/sw_64/include/asm/pci_impl.h b/arch/sw_64/include/asm/pci_impl.h new file mode 100644 index 000000000000..aa17a69b73f8 --- /dev/null +++ b/arch/sw_64/include/asm/pci_impl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#ifndef _SW64_KERNEL_PCI_IMPL_H +#define _SW64_KERNEL_PCI_IMPL_H + +#include + +struct pci_dev; +struct pci_controller; + +/* The hose list. */ +extern struct pci_controller *hose_head, **hose_tail; + +extern void common_init_pci(void); +extern struct pci_controller *alloc_pci_controller(void); +extern struct resource *alloc_resource(void); + +extern unsigned long size_for_memory(unsigned long max); + +extern const struct dma_map_ops sw64_dma_direct_ops; + +extern struct cma *sw64_kvm_cma; +extern struct gen_pool *sw64_kvm_pool; +#endif /* _SW64_KERNEL_PCI_IMPL_H */ diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c new file mode 100644 index 000000000000..abfba92fa6a9 --- /dev/null +++ b/arch/sw_64/kernel/pci-noop.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/pci-noop.c + * + * Stub PCI interfaces for NO PCI kernels. + */ + +#include +#include +#include +#include + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +static void *sw64_noop_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + if (!dev || *dev->dma_mask >= 0xffffffffUL) + gfp &= ~GFP_DMA; + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + return ret; +} + +static void sw64_noop_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t sw64_noop_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return page_to_pa(page) + offset; +} + +static int sw64_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + void *va; + + BUG_ON(!sg_page(sg)); + va = sg_virt(sg); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int sw64_noop_supported(struct device *dev, u64 mask) +{ + return mask < 0x00ffffffUL ? 0 : 1; +} + +const struct dma_map_ops sw64_noop_ops = { + .alloc = sw64_noop_alloc_coherent, + .free = sw64_noop_free_coherent, + .map_page = sw64_noop_map_page, + .map_sg = sw64_noop_map_sg, + .dma_supported = sw64_noop_supported, +}; + +const struct dma_map_ops *dma_ops = &sw64_noop_ops; +EXPORT_SYMBOL(dma_ops); + +void __init common_init_pci(void) +{ +} + +void __init sw64_init_arch(void) { } +void __init sw64_init_irq(void) { } diff --git a/arch/sw_64/pci/Makefile b/arch/sw_64/pci/Makefile new file mode 100644 index 000000000000..327efb163b12 --- /dev/null +++ b/arch/sw_64/pci/Makefile @@ -0,0 +1,8 @@ +PDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +obj-y += pci.o pci-legacy.o pci-sysfs.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/sw_64/pci/pci-legacy.c b/arch/sw_64/pci/pci-legacy.c new file mode 100644 index 000000000000..2a44463db0a4 --- /dev/null +++ b/arch/sw_64/pci/pci-legacy.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +unsigned long rc_linkup; + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus); + +static int __init +pcibios_init(void) +{ + if (acpi_disabled) + sw64_init_pci(); + return 0; +} +subsys_initcall(pcibios_init); + +void __init pcibios_claim_one_bus(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &b->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + if (r->flags & IORESOURCE_PCI_FIXED) { + if (pci_claim_resource(dev, i) == 0) + continue; + + pci_claim_bridge_resource(dev, i); + } + } + } + + list_for_each_entry(child_bus, &b->children, node) + pcibios_claim_one_bus(child_bus); +} + +static void __init +pcibios_claim_console_setup(void) +{ + struct pci_bus *b; + + list_for_each_entry(b, &pci_root_buses, node) + pcibios_claim_one_bus(b); +} + +int __weak chip_pcie_configure(struct pci_controller *hose) +{ + return 0; +} + +unsigned char last_bus = PCI0_BUS; +void __init common_init_pci(void) +{ + struct pci_controller *hose; + struct pci_host_bridge *bridge; + struct pci_bus *bus; + unsigned int init_busnr; + int need_domain_info = 0; + int ret; + unsigned long offset; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + bridge = pci_alloc_host_bridge(0); + if (!bridge) + continue; + hose->busn_space->start = last_bus; + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) + hose->first_busno = last_bus + 1; + else + hose->first_busno = last_bus; + pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); + pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); + pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); + pci_add_resource_offset(&bridge->windows, hose->busn_space, 0); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = hose->busn_space->start; + bridge->ops = &sw64_pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + continue; + } + + bus = hose->bus = bridge->bus; + hose->need_domain_info = need_domain_info; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + + hose->last_busno = hose->busn_space->end = last_bus; + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + } + + pcibios_claim_console_setup(); + + if (is_in_host()) { + list_for_each_entry(bus, &pci_root_buses, node) + pcibios_reserve_legacy_regions(bus); + } + + pr_info("SW arch assign unassigned resources.\n"); + + pci_assign_unassigned_resources(); + + for (hose = hose_head; hose; hose = hose->next) { + bus = hose->bus; + if (bus) + pci_bus_add_devices(bus); + } +} + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + resource_size_t offset; + struct resource *res; + + pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_space->flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_space->start; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffffffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(hose->io_space, res)) { + pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + +no_io: + return; +} + +/* PCIe RC operations */ +int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw6_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (test_bit(hose->node * 8 + hose->index, &rc_linkup)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +int sw6_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw6_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/* + *sw6_pcie_valid_device - Check if a valid device is present on bus + *@bus: PCI Bus structure + *@devfn: device/function + * + *Return: 'true' on success and 'false' if invalid device is found + */ +static bool sw6_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/* + *sw6_pcie_map_bus - Get configuration base + *@bus: PCI Bus structure + *@devfn: Device/function + *@where: Offset from base + * + *Return: Base address of the configuration space needed to be + *accessed. + */ +static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw6_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +struct pci_ops sw64_pci_ops = { + .map_bus = sw6_pcie_map_bus, + .read = sw6_pcie_config_read, + .write = sw6_pcie_config_write, +}; + +int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return sw64_chip_init->pci_init.map_irq(dev, slot, pin); +} + +static void __init +sw64_init_host(unsigned long node, unsigned long index) +{ + struct pci_controller *hose; + int ret = 0; + + hose = alloc_pci_controller(); + if (!hose) { + pr_warn("alloc NODE %ld RC %ld hose failed\n", node, index); + return; + } + hose->iommu_enable = false; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + hose->pre_mem_space = alloc_resource(); + hose->busn_space = alloc_resource(); + hose->index = index; + hose->node = node; + + sw64_chip_init->pci_init.hose_init(hose); + + if (sw64_chip_init->pci_init.set_rc_piu) + sw64_chip_init->pci_init.set_rc_piu(node, index); + + ret = sw64_chip_init->pci_init.check_pci_linkup(node, index); + if (ret == 0) { + /* Root Complex downstream port is link up */ + set_bit(node * 8 + index, &rc_linkup); //8-bit per node + } +} + +void __weak set_devint_wken(int node) {} +void __weak set_adr_int(int node) {} + +void __init sw64_init_arch(void) +{ + if (IS_ENABLED(CONFIG_PCI)) { + unsigned long node, cpu_num; + unsigned long rc_enable; + char id[8], msg[64]; + int i; + + cpu_num = sw64_chip->get_cpu_num(); + + for (node = 0; node < cpu_num; node++) { + if (is_in_host()) { + set_devint_wken(node); + set_adr_int(node); + } + } + + if (!acpi_disabled) + return; + + pr_info("SW arch PCI initialize!\n"); + for (node = 0; node < cpu_num; node++) { + rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); + if (rc_enable == 0) { + pr_notice("PCIe is disabled on node %ld\n", node); + continue; + } + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_enable >> i) & 0x1) + sw64_init_host(node, i); + } + if ((rc_linkup >> node * 8) & 0xff) { + memset(msg, 0, 64); + sprintf(msg, "Node %ld: RC [ ", node); + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_linkup >> (i + node * 8)) & 1) { + memset(id, 0, 8); + sprintf(id, "%d ", i); + strcat(msg, id); + } + } + strcat(msg, "] link up"); + pr_info("%s\n", msg); + } else { + pr_info("Node %ld: no RC link up\n", node); + } + } + } +} + +void __weak set_pcieport_service_irq(int node, int index) {} + +static void __init sw64_init_intx(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + if (sw64_chip_init->pci_init.set_intx) + sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +void __init sw64_init_irq(void) +{ + struct pci_controller *hose; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) + sw64_init_intx(hose); +} + +void __init +sw64_init_pci(void) +{ + pci_add_flags(PCI_REASSIGN_ALL_BUS); + common_init_pci(); + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/pci/pci-sysfs.c b/arch/sw_64/pci/pci-sysfs.c new file mode 100644 index 000000000000..5b52a534fa80 --- /dev/null +++ b/arch/sw_64/pci/pci-sysfs.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Sw_64 PCI resource files. + * + * Loosely based on generic HAVE_PCI_MMAP implementation in + * drivers/pci/pci-sysfs.c + */ + +#include + +static int hose_mmap_page_range(struct pci_controller *hose, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_type, int sparse) +{ + unsigned long base; + + if (mmap_type == pci_mmap_mem) + base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; + else + base = sparse ? hose->sparse_io_base : hose->dense_io_base; + + vma->vm_pgoff |= base >> PAGE_SHIFT; + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static int __pci_mmap_fits(struct pci_dev *pdev, int num, + struct vm_area_struct *vma, int sparse) +{ + unsigned long nr, start, size; + int shift = sparse ? 5 : 0; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + pci_name(pdev), num, size); + return 0; +} + +/** + * pci_mmap_resource - map a PCI resource into user memory space + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * @sparse: address space type + * + * Use the bus mapping routines to map a PCI resource into userspace. + */ +static int pci_mmap_resource(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma, int sparse) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + struct resource *res = attr->private; + enum pci_mmap_state mmap_type; + struct pci_bus_region bar; + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if (res == &pdev->resource[i]) + break; + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + + if (!__pci_mmap_fits(pdev, i, vma, sparse)) + return -EINVAL; + + pcibios_resource_to_bus(pdev->bus, &bar, res); + vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); + mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + + return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); +} + +static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 1); +} + +static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 0); +} + +/** + * pci_remove_resource_files - cleanup resource files + * @dev: dev to cleanup + * + * If we created resource files for @dev, remove them from sysfs and + * free their resources. + */ +void pci_remove_resource_files(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct bin_attribute *res_attr; + + res_attr = pdev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + + res_attr = pdev->res_attr_wc[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) +{ + struct pci_bus_region bar; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + long dense_offset; + unsigned long sparse_size; + + pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); + + /* + * All core logic chips have 4G sparse address space, except + * CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM + * definitions in asm/core_xxx.h files). This corresponds + * to 128M or 512M of the bus space. + */ + dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); + sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; + + return bar.end < sparse_size; +} + +static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, + char *suffix, struct bin_attribute *res_attr, + unsigned long sparse) +{ + size_t size = pci_resource_len(pdev, num); + + sprintf(name, "resource%d%s", num, suffix); + res_attr->mmap = sparse ? pci_mmap_resource_sparse : + pci_mmap_resource_dense; + res_attr->attr.name = name; + res_attr->attr.mode = 0600; + res_attr->size = sparse ? size << 5 : size; + res_attr->private = &pdev->resource[num]; + return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +} + +static int pci_create_attr(struct pci_dev *pdev, int num) +{ + /* allocate attribute structure, piggyback attribute name */ + int retval, nlen1, nlen2 = 0, res_count = 1; + unsigned long sparse_base, dense_base; + struct bin_attribute *attr; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + char *suffix, *attr_name; + + suffix = ""; + nlen1 = 10; + + if (pdev->resource[num].flags & IORESOURCE_MEM) { + sparse_base = hose->sparse_mem_base; + dense_base = hose->dense_mem_base; + if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { + sparse_base = 0; + suffix = "_dense"; + nlen1 = 16; /* resourceN_dense */ + } + } else { + sparse_base = hose->sparse_io_base; + dense_base = hose->dense_io_base; + } + + if (sparse_base) { + suffix = "_sparse"; + nlen1 = 17; + if (dense_base) { + nlen2 = 16; /* resourceN_dense */ + res_count = 2; + } + } + + attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr_name = (char *)(attr + res_count); + pdev->res_attr[num] = attr; + retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, + sparse_base); + if (retval || res_count == 1) + return retval; + + /* Create dense file */ + attr_name += nlen1; + attr++; + pdev->res_attr_wc[num] = attr; + return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); +} + +/** + * pci_create_resource_files - create resource files in sysfs for @dev + * @dev: dev in question + * + * Walk the resources in @dev creating files for each resource available. + */ +int pci_create_resource_files(struct pci_dev *pdev) +{ + int i; + int retval; + + /* Expose the PCI resources from this device as files */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + + /* skip empty resources */ + if (!pci_resource_len(pdev, i)) + continue; + + retval = pci_create_attr(pdev, i); + if (retval) { + pci_remove_resource_files(pdev); + return retval; + } + } + return 0; +} + +/* Legacy I/O bus mapping stuff. */ + +static int __legacy_mmap_fits(struct pci_controller *hose, + struct vm_area_struct *vma, + unsigned long res_size, int sparse) +{ + unsigned long nr, start, size; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((res_size - 1) >> PAGE_SHIFT) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %ld (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + hose->index, size); + return 0; +} + +static inline int has_sparse(struct pci_controller *hose, + enum pci_mmap_state mmap_type) +{ + unsigned long base; + + base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : + hose->sparse_io_base; + + return base != 0; +} + +int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int sparse = has_sparse(hose, mmap_type); + unsigned long res_size; + + res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : + bus->legacy_io->size; + if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) + return -EINVAL; + + return hose_mmap_page_range(hose, vma, mmap_type, sparse); +} + +/** + * pci_adjust_legacy_attr - adjustment of legacy file attributes + * @b: bus to create files under + * @mmap_type: I/O port or memory + * + * Adjust file name and size for sparse mappings. + */ +void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (!has_sparse(hose, mmap_type)) + return; + + if (mmap_type == pci_mmap_mem) { + bus->legacy_mem->attr.name = "legacy_mem_sparse"; + bus->legacy_mem->size <<= 5; + } else { + bus->legacy_io->attr.name = "legacy_io_sparse"; + bus->legacy_io->size <<= 5; + } +} + +/* Legacy I/O bus read/write functions */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + *((u8 *)val) = inb(port); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = inw(port); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = inl(port); + return 4; + } + return -EINVAL; +} + +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + outb(port, val); + return 1; + case 2: + if (port & 1) + return -EINVAL; + outw(port, val); + return 2; + case 4: + if (port & 3) + return -EINVAL; + outl(port, val); + return 4; + } + return -EINVAL; +} diff --git a/arch/sw_64/pci/pci.c b/arch/sw_64/pci/pci.c new file mode 100644 index 000000000000..3db9816e19f1 --- /dev/null +++ b/arch/sw_64/pci/pci.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +/* + * raw_pci_read/write - Platform-specific PCI config space access. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + +/** + * Just declaring that the power-of-ten prefixes are actually the + * power-of-two ones doesn't make it true :) + */ +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long alignto; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start - hose->io_space->start < PCIBIOS_MIN_IO) + start = PCIBIOS_MIN_IO + hose->io_space->start; + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) + start = PCIBIOS_MIN_MEM + hose->mem_space->start; + /* + * The following holds at least for the Low Cost + * SW64 implementation of the PCI interface: + * + * In sparse memory address space, the first + * octant (16MB) of every 128MB segment is + * aliased to the very first 16 MB of the + * address space (i.e., it aliases the ISA + * memory address space). Thus, we try to + * avoid allocating PCI devices in that range. + * Can be allocated in 2nd-7th octant only. + * Devices that need more than 112MB of + * address space must be accessed through + * dense memory space only! + */ + + /* Align to multiple of size of minimum base. */ + alignto = max_t(resource_size_t, 0x1000UL, align); + start = ALIGN(start, alignto); + if (hose->sparse_mem_base && size <= 7 * 16*MB) { + if (((start / (16*MB)) & 0x7) == 0) { + start &= ~(128*MB - 1); + start += 16*MB; + start = ALIGN(start, alignto); + } + if (start/(128*MB) != (start + size - 1)/(128*MB)) { + start &= ~(128*MB - 1); + start += (128 + 16)*MB; + start = ALIGN(start, alignto); + } + } + } + + return start; +} + +#undef KB +#undef MB +#undef GB + +char *pcibios_setup(char *str) +{ + return str; +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + /* Propagate hose info into the subordinate devices. */ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + struct pci_dev *dev = bus->self; + + if (!dev || bus->number == hose->first_busno) { + bus->resource[0] = hose->io_space; + bus->resource[1] = hose->mem_space; + bus->resource[2] = hose->pre_mem_space; + } +} + +/** + * Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right hose. + */ +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + + hose = bus_num_to_pci_controller(bus); + if (hose == NULL) + return -ENODEV; + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ +} +EXPORT_SYMBOL(pci_iounmap); + +void __init reserve_mem_for_pci(void) +{ + int ret; + unsigned long base = PCI_32BIT_MEMIO; + + ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); + if (ret) { + pr_err("reserved pages for pcie memory space failed\n"); + return; + } + + pr_info("reserved pages for pcie memory space %lx:%lx\n", base >> PAGE_SHIFT, + (base + PCI_32BIT_MEMIO_SIZE) >> PAGE_SHIFT); +} + +const struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +/* Quirks */ +static void quirk_isa_bridge(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_ISA << 8; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); + +/* + * Early fix up the Root Complex settings + */ +static void fixup_root_complex(struct pci_dev *dev) +{ + int i; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + hose->self_busno = hose->busn_space->start; + + if (likely(bus->number == hose->self_busno)) { + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { + /* Check Root Complex port again */ + dev->is_hotplug_bridge = 0; + dev->current_state = PCI_D0; + } + + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; + } + } + atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_SW64_ROOT_BRIDGE, fixup_root_complex); + +static int setup_bus_dma_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); + return 0; +} + +static void fix_bus_dma_limit(struct pci_dev *dev) +{ + pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL); + pr_info("Set zx200 bus_dma_limit to 32-bit\n"); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit); + +#ifdef CONFIG_DCA +static void enable_sw_dca(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long node, rc_index, dca_ctl, dca_conf; + int i; + + if (dev->class >> 8 != PCI_CLASS_NETWORK_ETHERNET) + return; + + node = hose->node; + rc_index = hose->index; + + for (i = 0; i < 256; i++) { + dca_conf = read_piu_ior1(node, rc_index, DEVICEID0 + (i << 7)); + if (dca_conf >> 63) + continue; + else { + dca_conf = (1UL << 63) | (dev->bus->number << 8) | dev->devfn; + pr_info("dca device index %d, dca_conf = %#lx\n", i, dca_conf); + write_piu_ior1(node, rc_index, DEVICEID0 + (i << 7), dca_conf); + break; + } + } + + dca_ctl = read_piu_ior1(node, rc_index, DCACONTROL); + if (dca_ctl & 0x1) { + dca_ctl = 0x2; + write_piu_ior1(node, rc_index, DCACONTROL, dca_ctl); + pr_info("Node %ld RC %ld enable DCA 1.0\n", node, rc_index); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); +#endif + +/** + * There are some special aspects to the Root Complex of Sunway: + * 1. Root Complex config space base addr is different + * from EP config space base addr. + * 2. For the case of multiple Root Complex, different + * Root Complex have config space base addr. + * + * These means that even if multiple Root Complex share + * the same segment group number, their bus numbers can + * still overlap. + * + * But due to a Xorg related issue, we can not overlap + * the bus numbers of multiple Root Complex. So, after + * scanning the Root Complex, use "last_bus" to record + * the next bus number of the current maximum used bus + * number, and use it as the start bus number of the + * next Root Complex to be scanned. + * + * A question: when there is too much RCs, may 256 bus + * numbers be insufficient? + */ +static unsigned char last_bus; + +void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct resource_entry *entry = NULL; + struct pci_bus *bus = bridge->bus; + unsigned long flags = 0; + unsigned int init_busnr = 0; + + hose = pci_bus_to_pci_controller(bus); + + resource_list_for_each_entry(entry, &bridge->windows) { + flags = entry->res->flags; + if (flags & IORESOURCE_IO) { + entry->offset = entry->res->start; + hose->io_space = entry->res; + } else if (flags & IORESOURCE_BUS) { + entry->res->start = last_bus; + hose->busn_space = entry->res; + } else if (flags & IORESOURCE_MEM) { + if (!(flags & IORESOURCE_PREFETCH)) { + entry->offset = entry->res->start - PCI_32BIT_MEMIO; + hose->mem_space = entry->res; + } else + hose->pre_mem_space = entry->res; + } + } + + /** + * We scan Root Complex and update bus num in kernel, + * not in firmware. Firmware just pass 0x0-0xff via _CRS. + * + * So, need to update bus num of pci host bridge here. + */ + bridge->busnr = last_bus; + dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), last_bus); + + /** + * At this point, pci_bus has been created and use old + * bridge->busnr, so need to update bus->number here. + */ + bus->number = last_bus; + + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_pci_map_irq; + + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + hose->first_busno = last_bus + (is_in_host() ? 1 : 0); + + pci_add_flags(PCI_REASSIGN_ALL_BUS); +} + +static void sw64_pci_root_bridge_reserve_legacy_io(struct pci_host_bridge *bridge) +{ + struct pci_bus *bus = bridge->bus; + struct resource_entry *entry = NULL; + struct resource *res = NULL; + + resource_list_for_each_entry(entry, &bridge->windows) { + if (!(entry->res->flags & IORESOURCE_IO)) + continue; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + if (res == NULL) { + pr_err("alloc resource for legacy io out of mem\n"); + return; + } + + res->name = "legacy io"; + res->flags = IORESOURCE_IO; + res->start = entry->res->start; + res->end = (res->start + 0xFFF) & 0xFFFFFFFFFFFFFFFFUL; + + pr_info("reserving legacy io %pR for domain %04x\n", + res, pci_domain_nr(bus)); + if (request_resource(entry->res, res)) { + pr_err("pci %04x:%02x reserve legacy io %pR failed\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + } +} + +void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct pci_bus *bus = NULL; + unsigned int init_busnr = 0; + + bus = bridge->bus; + + hose = pci_bus_to_pci_controller(bus); + hose->bus = bus; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else { + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + } + + hose->last_busno = last_bus; + hose->busn_space->end = last_bus; + + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + + pr_info("bus number update to %u\n", last_bus); + + if (is_in_host()) + sw64_pci_root_bridge_reserve_legacy_io(bridge); + + /** + * Root Complex of SW64 does not support ASPM, causing + * control field(_OSC) unable to be updated. + * + * Related logic can be found in "negotiate_os_control". + */ + bridge->native_aer = 1; + bridge->native_pme = 1; + + /** + * Since some buggy firmwares may configure invalid bridge bus numbers, + * the kernel re-assigns all PCI bus numbers when scan Root Complex. + * + * However, users may trigger a pci bus rescan in the userspace by the + * command below: + * + * > echo 1 > /sys/bus/pci/rescan + * + * Unexpected errors may occur on the endpoint devices due to the re-assign + * bus numbers of upstream bridges. + * + * To work around this problem, the flag PCI_REASSIGN_ALL_BUS is set before + * scanning Root Complex and cleared after scanning Root Complex. + */ + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} -- Gitee From 24f8968a4e5b650b6c45b6b1b214d27f2dafee7e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:28 +0800 Subject: [PATCH 312/953] anolis: sw64: add MSI support ANBZ: #4688 Add basic MSI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/msi.h | 93 ++++++++++++++++++++++++++++++++++++ arch/sw_64/pci/msi.c | 21 ++++++++ 2 files changed, 114 insertions(+) create mode 100644 arch/sw_64/include/asm/msi.h create mode 100644 arch/sw_64/pci/msi.c diff --git a/arch/sw_64/include/asm/msi.h b/arch/sw_64/include/asm/msi.h new file mode 100644 index 000000000000..dbf6f81843be --- /dev/null +++ b/arch/sw_64/include/asm/msi.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MSI_H +#define _ASM_SW64_MSI_H + +#include + +#define NR_VECTORS NR_IRQS +#define NR_IRQ_VECTORS NR_IRQS + +#define AUTO_ASSIGN 0 + +#define LAST_DEVICE_VECTOR 31 + +#define MSI_OFFSET 0x44 + +#define NUM_MSI_IRQS 256 + +#define PERCPU_MSI_IRQS 256 + +#define VT_MSIX_MSG_ADDR (0x8000fee00000UL) +#define VT_MSIX_ADDR_DEST_ID_SHIFT 12 +#define VT_MSIX_ADDR_DEST_ID_MASK (0xff << VT_MSIX_ADDR_DEST_ID_SHIFT) +#define VT_MSIX_ADDR_DEST_ID(dest) \ + (((dest) << VT_MSIX_ADDR_DEST_ID_SHIFT) & VT_MSIX_ADDR_DEST_ID_MASK) + + +#ifdef CONFIG_PCI_MSI +extern void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector); +extern int msi_compose_msg(unsigned int irq, struct msi_msg *msg); +extern void sw64_irq_noop(struct irq_data *d); +extern struct irq_chip sw64_irq_chip; +extern void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, + unsigned long pci_msi1_addr); + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0x91abc0 + +#define MSI_ADDR_SHIFT 20 +#define MSI_ADDR_DEST_ID_SHIFT 10 + +struct sw64_msi_chip_data { + spinlock_t cdata_lock; + union { + unsigned long msi_config; + unsigned long msiaddr; + }; + unsigned long rc_node; + unsigned long rc_index; + unsigned int msi_config_index; + unsigned int dst_cpu; + unsigned int vector; + unsigned int prev_cpu; + unsigned int prev_vector; + unsigned int multi_msi; + bool move_in_progress; +}; + +static inline int rcid_to_msicid(int rcid) +{ + int msicid = 0; + + msicid |= (rcid_to_domain_id(rcid) << 7); + msicid |= (rcid_to_thread_id(rcid) << 6); + msicid |= (rcid_to_core_id(rcid) << 0); + + return msicid; +} + +extern void arch_init_msi_domain(struct irq_domain *domain); +enum irq_alloc_type { + IRQ_ALLOC_TYPE_MSI, + IRQ_ALLOC_TYPE_MSIX, + IRQ_ALLOC_TYPE_INTX, +}; +struct irq_alloc_info { + struct msi_desc *desc; + enum irq_alloc_type type; + struct pci_dev *msi_dev; + irq_hw_number_t hwirq; +}; +typedef struct irq_alloc_info msi_alloc_info_t; +#else /* !CONFIG_PCI_MSI */ +static inline void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, unsigned long pci_msi1_addr) +{ + pr_warn("SW arch disable CONFIG_PCI_MSI option.\n"); +} +#endif /* CONFIG_PCI_MSI */ +#endif /* _ASM_SW64_MSI_H */ diff --git a/arch/sw_64/pci/msi.c b/arch/sw_64/pci/msi.c new file mode 100644 index 000000000000..fc2c122c37ef --- /dev/null +++ b/arch/sw_64/pci/msi.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +int msi_compose_msg(unsigned int irq, struct msi_msg *msg) +{ + msg->address_hi = (unsigned int)(MSIX_MSG_ADDR >> 32); + msg->address_lo = (unsigned int)(MSIX_MSG_ADDR & 0xffffffff); + msg->data = irq; + return irq; +} + +void sw64_irq_noop(struct irq_data *d) +{ +} + +void arch_teardown_msi_irq(unsigned int irq) +{ +} -- Gitee From 14b93c44f2ed7e32b9842c12b15745985962555c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 313/953] anolis: sw64: add device trees ANBZ: #4688 Add device trees for SW64 based chip3 platform and virtual machines (including an empty device tree for platforms that are under development). Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/boot/dts/chip3.dts | 240 ++++++++++++++++++++++++++++++++ arch/sw_64/boot/dts/chip_vt.dts | 55 ++++++++ arch/sw_64/boot/dts/empty.dts | 15 ++ 3 files changed, 310 insertions(+) create mode 100644 arch/sw_64/boot/dts/chip3.dts create mode 100644 arch/sw_64/boot/dts/chip_vt.dts create mode 100644 arch/sw_64/boot/dts/empty.dts diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts new file mode 100644 index 000000000000..082506393ac9 --- /dev/null +++ b/arch/sw_64/boot/dts/chip3.dts @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks { + i2cclk: i2cclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "i2cclk_25mhz"; + }; + spiclk: spiclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "spiclk_25mhz"; + }; + + }; + + intc: interrupt-controller { + compatible = "sw64,sw6_irq_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + lpc_intc: interrupt-controller@0x8037 { + compatible = "sw64,lpc_intc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <2>; + }; + + uart: serial0@8033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x8033 0x0 0x0 0x1000>; + interrupt-parent=<&intc>; + interrupts = <3>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + serial1@9033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x9033 0x0 0x0 0x1000>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + + i2c0@0x8031 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,designware-i2c"; + reg = <0x8031 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <5>; + status = "okay"; + }; + + i2c1@0x8034 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8034 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <6>; + status = "okay"; + }; + + i2c2@0x8035 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8035 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <7>; + status = "okay"; + + rtc: pcf8523@68 { + compatible = "nxp,pcf8523"; + reg = <0x68>; + }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; + }; + + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + + spi: spi@0x8032 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3-spi"; + reg = <0x8032 0x0 0x0 0x8000>; + clocks = <&spiclk>; + interrupt-parent=<&intc>; + interrupts = <4>; + status = "okay"; + + flash@0 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <0 0 0 0 >; /* 0: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares0"; + reg = <0 0x400000>; + }; + }; + }; + + flash@1 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <1 0 0 0 >; /* 1: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares1"; + reg = <0 0x400000>; + }; + }; + }; + }; + + lpc: lpc@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3_lpc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + status = "okay"; + + }; + + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + interrupt-parent=<&lpc_intc>; + interrupts = <10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + gpio: gpio@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,sw-gpio"; + reg = <0x8036 0x0 0x0 0x8000>; + status = "okay"; + + porta: gpio-contraller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0 0 0 0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent=<&intc>; + interrupts = <0>; + }; + }; + + }; +}; diff --git a/arch/sw_64/boot/dts/chip_vt.dts b/arch/sw_64/boot/dts/chip_vt.dts new file mode 100644 index 000000000000..f26285367f98 --- /dev/null +++ b/arch/sw_64/boot/dts/chip_vt.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + intc: interrupt-controller{ + compatible = "sw64,sw6_irq_vt_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart: serial0@8801 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "ns16550a"; + reg = <0x8801 0x3f8 0x0 0x10>; + interrupt-parent=<&intc>; + interrupts = <12>; + reg-shift = <0>; + reg-io-width = <1>; + clock-frequency = <24000000>; + status = "okay"; + }; + misc: misc0@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-ged"; + reg = <0x8036 0x0 0x0 0x20>; + interrupt-parent=<&intc>; + interrupts = <13>; + reg-shift = <0>; + reg-io-width = <8>; + clock-frequency = <24000000>; + status = "okay"; + }; + fw_cfg: fw_cfg@8049 { + dma-coherent; + reg = <0x8049 0x20000000 0x0 0x18>; + compatible = "qemu,fw-cfg-mmio"; + }; + }; +}; diff --git a/arch/sw_64/boot/dts/empty.dts b/arch/sw_64/boot/dts/empty.dts new file mode 100644 index 000000000000..f8fe34e29641 --- /dev/null +++ b/arch/sw_64/boot/dts/empty.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + }; +}; -- Gitee From 573d2370ba6c9421a5013ce55fe45b692f5e7c30 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:00 +0800 Subject: [PATCH 314/953] anolis: sw64: add ACPI support ANBZ: #4688 Add basic ACPI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/acenv.h | 40 +++++ arch/sw_64/include/asm/acpi.h | 117 +++++++++++++ arch/sw_64/kernel/acpi.c | 304 +++++++++++++++++++++++++++++++++ arch/sw_64/pci/acpi.c | 245 ++++++++++++++++++++++++++ 4 files changed, 706 insertions(+) create mode 100644 arch/sw_64/include/asm/acenv.h create mode 100644 arch/sw_64/include/asm/acpi.h create mode 100644 arch/sw_64/kernel/acpi.c create mode 100644 arch/sw_64/pci/acpi.c diff --git a/arch/sw_64/include/asm/acenv.h b/arch/sw_64/include/asm/acenv.h new file mode 100644 index 000000000000..53b2898718fe --- /dev/null +++ b/arch/sw_64/include/asm/acenv.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACENV_H +#define _ASM_SW64_ACENV_H + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ +#define ACPI_FLUSH_CPU_CACHE() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) +#endif /* _ASM_SW64_ACENV_H */ diff --git a/arch/sw_64/include/asm/acpi.h b/arch/sw_64/include/asm/acpi.h new file mode 100644 index 000000000000..ef46f481e1fd --- /dev/null +++ b/arch/sw_64/include/asm/acpi.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACPI_H +#define _ASM_SW64_ACPI_H + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; + +/* _ASM_SW64_PDC_H */ +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE + +/** + * Use the number 64 is just because this number is the most + * frequently used number in other architectures. Actually, + * SW64 does not have fixmap area in memory layout. + */ +#define NR_FIX_BTMAPS 64 + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = 0; + acpi_pci_disabled = 0; + acpi_noirq = 0; +} + +static inline void acpi_noirq_set(void) +{ + acpi_noirq = 1; +} + +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +/* Low-level suspend routine. */ +extern int (*acpi_suspend_lowlevel)(void); +extern unsigned long long arch_acpi_wakeup_start; + +/* Physical address to resume after wakeup */ +#define acpi_wakeup_address arch_acpi_wakeup_start + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + return max_cstate; +} + +static inline bool arch_has_acpi_pdc(void) +{ + return false; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ +} +#else /* !CONFIG_ACPI */ + +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define acpi_unlazy_tlb(x) +#endif /* _ASM_SW64_ACPI_H */ diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c new file mode 100644 index 000000000000..9779d4bdea0d --- /dev/null +++ b/arch/sw_64/kernel/acpi.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +#include +#endif + +int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + +int acpi_noirq = 1; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + +static bool param_acpi_on __initdata; +static bool param_acpi_off __initdata; + +int acpi_strict; +u64 arch_acpi_wakeup_start; +u64 acpi_saved_sp_s3; + +#define MAX_LOCAL_APIC 256 + +#define PREFIX "ACPI: " +/* + * The default interrupt routing model is PIC (8259). This gets + * overridden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; +void __iomem *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ + if (!phys || !size) + return NULL; + + return early_ioremap(phys, size); +} +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_iounmap(map, size); +} +/* + * Following __acpi_xx functions should be implemented for sepecific cpu. + */ +int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) +{ + if (irqp != NULL) + *irqp = acpi_register_gsi(NULL, gsi, -1, -1); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + +int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi) +{ + if (gsi) + *gsi = isa_irq; + + return 0; +} + +int (*acpi_suspend_lowlevel)(void); + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +static struct irq_domain *irq_default_domain; +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + u32 irq; + + irq = irq_find_mapping(irq_default_domain, gsi); + + return irq; +} +EXPORT_SYMBOL_GPL(acpi_register_gsi); + +void acpi_unregister_gsi(u32 gsi) +{ + +} +EXPORT_SYMBOL_GPL(acpi_unregister_gsi); + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +u8 acpi_checksum(u8 *table, u32 length) +{ + u8 ret = 0; + + while (length--) { + ret += *table; + table++; + } + return -ret; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* disable both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) + param_acpi_off = true; + else if (strcmp(arg, "on") == 0) /* prefer ACPI over device tree */ + param_acpi_on = true; + else + return -EINVAL; /* Core will printk when we return error. */ + + return 0; +} +early_param("acpi", parse_acpi); + +/* + * __acpi_acquire_global_lock + * will always return -1 indicating owning the lock. + * + * __acpi_release_global_lock will always return 0 indicating + * no acquring request pending. + */ +int __acpi_acquire_global_lock(unsigned int *lock) +{ + return -1; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + return 0; +} + +#ifdef CONFIG_ACPI_NUMA +static int rcid_to_cpu(int physical_id) +{ + int i; + + for (i = 0; i < NR_CPUS; ++i) { + if (__cpu_to_rcid[i] == physical_id) + return i; + } + + /* physical id not found */ + return -1; +} + +/* Callback for Proximity Domain -> CPUID mapping */ +void __init +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) +{ + int pxm, node; + int cpu; // logical core id + + if (srat_disabled()) + return; + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) { + pxm |= (pa->proximity_domain_hi[0] << 8); + pxm |= (pa->proximity_domain_hi[1] << 16); + pxm |= (pa->proximity_domain_hi[2] << 24); + } + + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_err("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", pxm, pa->apic_id, node); + return; + } + + /* Record the mapping from logical core id to node id */ + cpu = rcid_to_cpu(pa->apic_id); + if (cpu < 0) { + pr_err("SRAT: Can not find the logical id for physical Core 0x%02x\n", pa->apic_id); + return; + } + + early_map_cpu_to_node(cpu, node); + + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static inline int save_add_info(void) { return 1; } +#else +static inline int save_add_info(void) { return 0; } +#endif + +#endif + +void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +{ +#ifdef CONFIG_ACPI_NUMA + int nid; + + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpuid_to_node(cpu, nid); + node_set(nid, numa_nodes_parsed); + } +#endif + return 0; +} + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu) +{ + int cpu; + struct acpi_madt_local_apic *processor; + + processor = kzalloc(sizeof(struct acpi_madt_local_apic), GFP_KERNEL); + processor->id = physid; + processor->processor_id = acpi_id; + processor->lapic_flags = ACPI_MADT_ENABLED; + + cpu = set_processor_mask(processor); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; + } + + acpi_map_cpu2node(handle, cpu, physid); + + *pcpu = cpu; + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ +#ifdef CONFIG_ACPI_NUMA + set_cpuid_to_node(cpu, NUMA_NO_NODE); +#endif + set_cpu_present(cpu, false); + num_processors--; + + pr_info("cpu%d hot remove!\n", cpu); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +void __init acpi_boot_table_init(void) +{ + /** + * ACPI is disabled by default. + * ACPI is only enabled when firmware passes ACPI table + * and sets boot parameter "acpi=on". + */ + if (param_acpi_on) + enable_acpi(); + + /* + * If acpi_disabled, bail out + */ + if (!acpi_disabled) { + pr_warn("Currently, ACPI is an experimental feature!\n"); + if (acpi_table_init()) { + pr_err("Failed to init ACPI tables\n"); + disable_acpi(); + } else + pr_info("Successfully parsed ACPI table\n"); + } +} diff --git a/arch/sw_64/pci/acpi.c b/arch/sw_64/pci/acpi.c new file mode 100644 index 000000000000..1353994320b3 --- /dev/null +++ b/arch/sw_64/pci/acpi.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +struct pci_root_info { + struct acpi_pci_root_info info; + struct pci_config_window *cfg; +}; + +static void pci_acpi_release_root_info(struct acpi_pci_root_info *ci) +{ + struct pci_root_info *pci_ri; + + pci_ri = container_of(ci, struct pci_root_info, info); + pci_ecam_free(pci_ri->cfg); + kfree(ci->ops); + kfree(pci_ri); +} + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +/** + * Lookup the MCFG table entry corresponding to the current + * PCI host controller, and set up config space mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct pci_config_window *cfg = NULL; + const struct pci_ecam_ops *ecam_ops = NULL; + struct resource *bus_res = &root->secondary; + struct resource cfg_res; + struct acpi_device *adev = NULL; + int ret = 0, bus_shift = 0; + u16 seg = root->segment; + + ret = pci_mcfg_lookup(root, &cfg_res, &ecam_ops); + if (ret < 0) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + /** + * Do the quirk of bus shift here, since we can not + * know the ECAM addr in MCFG table when fill mcfg_quirks + */ + bus_shift = ecam_ops->bus_shift; + cfg_res.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfg_res.end = cfg_res.start + ((resource_size(bus_res)) << bus_shift) - 1; + cfg_res.flags = IORESOURCE_MEM; + + /** + * ECAM area considered as the mem resource of the current + * PCI host controller, we'd better record this resource + * in ACPI namespace(_CRS). + */ + adev = acpi_resource_consumer(&cfg_res); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfg_res, + dev_name(&adev->dev)); + else + dev_info(dev, "Note: ECAM area %pR not reserved in ACPI namespace\n", + &cfg_res); + + cfg = pci_ecam_create(dev, &cfg_res, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +static int pci_acpi_prepare_root_resources(struct acpi_pci_root_info *ci) +{ + int status = 0; + acpi_status rc; + unsigned long long mem_space_base = 0; + struct resource_entry *entry = NULL, *tmp = NULL; + struct acpi_device *device = ci->bridge; + + /** + * Get host bridge resources via _CRS method, the return value + * is the num of resource parsed. + */ + status = acpi_pci_probe_root_resources(ci); + if (status > 0) { + /** + * To distinguish between mem and pre_mem, firmware only pass the + * lower 32bits of mem via acpi and use vendor specific "MEMH" to + * record the upper 32 bits of mem. + * + * Get the upper 32 bits here. + */ + rc = acpi_evaluate_integer(ci->bridge->handle, + "MEMH", NULL, &mem_space_base); + if (rc != AE_OK) { + dev_err(&device->dev, "unable to retrieve MEMH\n"); + return -EEXIST; + } + + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (entry->res->flags & IORESOURCE_MEM) { + if (!(entry->res->end & 0xFFFFFFFF00000000ULL)) { + /* Patch the mem resource with upper 32 bits */ + entry->res->start |= (mem_space_base << 32); + entry->res->end |= (mem_space_base << 32); + } else { + /** + * Add PREFETCH and MEM_64 flags for pre_mem, + * so that we can distinguish between mem and + * pre_mem. + */ + entry->res->flags |= IORESOURCE_PREFETCH; + entry->res->flags |= IORESOURCE_MEM_64; + } + } + + dev_dbg(&device->dev, + "host bridge resource: 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + } + return status; + } + + /** + * If not successfully parse resources, destroy + * resources which have been parsed. + */ + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + dev_info(&device->dev, + "host bridge resource(ignored): 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + resource_list_destroy_entry(entry); + } + + return 0; +} + +/** + * This function is called from ACPI code and used to + * setup PCI host controller. + */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct pci_bus *bus = NULL, *child = NULL; + struct pci_root_info *pci_ri = NULL; + struct acpi_pci_root_ops *root_ops = NULL; + int domain = root->segment; + int busnum = root->secondary.start; + + pci_ri = kzalloc(sizeof(*pci_ri), GFP_KERNEL); + if (!pci_ri) + goto out_of_mem_0; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) + goto out_of_mem_1; + + pci_ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!pci_ri->cfg) + goto setup_ecam_err; + + root_ops->release_info = pci_acpi_release_root_info; + root_ops->prepare_resources = pci_acpi_prepare_root_resources; + root_ops->pci_ops = (struct pci_ops *)&pci_ri->cfg->ops->pci_ops; + + bus = pci_find_bus(domain, busnum); + if (bus) { + memcpy(bus->sysdata, pci_ri->cfg, sizeof(struct pci_config_window)); + kfree(pci_ri->cfg); + kfree(pci_ri); + kfree(root_ops); + } else { + bus = acpi_pci_root_create(root, root_ops, &pci_ri->info, pci_ri->cfg); + + /** + * No need to do kfree here, because acpi_pci_root_create will free + * mem alloced when it cannot create pci_bus. + */ + if (!bus) + return NULL; + + /* Some quirks for pci controller of Sunway after scanning Root Complex */ + sw64_pci_root_bridge_scan_finish_up(pci_find_host_bridge(bus)); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + return bus; + +setup_ecam_err: + kfree(root_ops); +out_of_mem_1: + kfree(pci_ri); +out_of_mem_0: + pr_warn("RC [%04x:%02x:] failed (out of memory or setup ecam error)!\n", + domain, busnum); + + return NULL; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + if (!acpi_disabled) { + struct pci_config_window *cfg = bridge->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct pci_controller *hose = cfg->priv; + struct device *bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, hose->node); + + /* Some quirks for pci controller of Sunway before scanning Root Complex */ + sw64_pci_root_bridge_prepare(bridge); + } + + return 0; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} -- Gitee From b2e15a7f88279d5432d732943d15b547ec3152b1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 315/953] anolis: sw64: add DMA support ANBZ: #4688 Add DMA support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/dma-direct.h | 15 ++ arch/sw_64/include/asm/dma-mapping.h | 14 ++ arch/sw_64/include/asm/dma.h | 350 +++++++++++++++++++++++++++ 3 files changed, 379 insertions(+) create mode 100644 arch/sw_64/include/asm/dma-direct.h create mode 100644 arch/sw_64/include/asm/dma-mapping.h create mode 100644 arch/sw_64/include/asm/dma.h diff --git a/arch/sw_64/include/asm/dma-direct.h b/arch/sw_64/include/asm/dma-direct.h new file mode 100644 index 000000000000..dee1680b8f6d --- /dev/null +++ b/arch/sw_64/include/asm/dma-direct.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_DIRECT_H +#define _ASM_SW64_DMA_DIRECT_H + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* _ASM_SW64_DMA_DIRECT_H */ diff --git a/arch/sw_64/include/asm/dma-mapping.h b/arch/sw_64/include/asm/dma-mapping.h new file mode 100644 index 000000000000..65795f8e5792 --- /dev/null +++ b/arch/sw_64/include/asm/dma-mapping.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_MAPPING_H +#define _ASM_SW64_DMA_MAPPING_H + + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(void) +{ + return dma_ops; +} + + +#endif /* _ASM_SW64_DMA_MAPPING_H */ diff --git a/arch/sw_64/include/asm/dma.h b/arch/sw_64/include/asm/dma.h new file mode 100644 index 000000000000..cf6a9cf75233 --- /dev/null +++ b/arch/sw_64/include/asm/dma.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw_64/dma.h + * + * This is essentially the same as the i386 DMA stuff, as the SW64PCs + * use ISA-compatible dma. The only extension is support for high-page + * registers that allow to set the top 8 bits of a 32-bit DMA address. + * This register should be written last when setting up a DMA address + * as this will also enable DMA across 64 KB boundaries. + */ + +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_SW64_DMA_H +#define _ASM_SW64_DMA_H + +#include +#include + +#define dma_outb outb +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* + * ISA DMA limitations on sw64 platforms, + + * These may be due to SIO (PCI<->ISA bridge) chipset limitation, or + * just a wiring limit. + */ + +/* + * Maximum address for all the others is the complete 32-bit bus + * address space. + */ +#define MAX_ISA_DMA_ADDRESS 0x100000000UL + +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +/* + * If we have the iommu, we don't have any address limitations on DMA. + * Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone + * like i386. + */ +#define MAX_DMA_ADDRESS ~0UL + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ +#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ +#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) +#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) +#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) +#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) +#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) +#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) +#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) +#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static inline unsigned long claim_dma_lock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static inline void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static inline void enable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static inline void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static inline void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* set extended mode for a specific DMA channel */ +static inline void set_dma_ext_mode(unsigned int dmanr, char ext_mode) +{ + if (dmanr <= 3) + dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); + else + dma_outb(ext_mode | (dmanr & 3), DMA2_EXT_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register. + */ +static inline void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + dma_outb((pagenr >> 8), DMA_HIPAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + dma_outb((pagenr >> 8), DMA_HIPAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + dma_outb((pagenr >> 8), DMA_HIPAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + dma_outb((pagenr >> 8), DMA_HIPAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8), DMA_HIPAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8), DMA_HIPAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8), DMA_HIPAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + if (dmanr <= 3) { + dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + } + set_dma_page(dmanr, a >> 16); /* set hipage last to enable 32-bit mode */ +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static inline int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr <= 3) ? + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE : + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ +#define KERNEL_HAVE_CHECK_DMA +extern int check_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif /* _ASM_SW64_DMA_H */ -- Gitee From 6a8915f933d9946eb1ae331d5fb0c8e6101e8551 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:07 +0800 Subject: [PATCH 316/953] anolis: sw64: add EFI support ANBZ: #4688 Add basic EFI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/dmi.h | 30 ++++++++++++++++++++++++++++++ arch/sw_64/include/asm/efi.h | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 arch/sw_64/include/asm/dmi.h create mode 100644 arch/sw_64/include/asm/efi.h diff --git a/arch/sw_64/include/asm/dmi.h b/arch/sw_64/include/asm/dmi.h new file mode 100644 index 000000000000..05e80c9a3a76 --- /dev/null +++ b/arch/sw_64/include/asm/dmi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/dmi.h + * + * Copyright (C) 2019 Deepin Limited. + * Porting by: Deepin Kernel Team (kernel@deepin.com) + * + * based on arch/x864/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_SW64_DMI_H +#define _ASM_SW64_DMI_H + +#include +#include +#include +#include + +/* Use early IO mappings for DMI because it's initialized early */ +#define dmi_early_remap(x, l) early_ioremap(x, l) +#define dmi_early_unmap(x, l) early_iounmap(x, l) +#define dmi_remap(x, l) early_ioremap(x, l) +#define dmi_unmap(x) early_iounmap(x, 0) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif /* _ASM_SW64_DMI_H */ diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h new file mode 100644 index 000000000000..34d5637e23c2 --- /dev/null +++ b/arch/sw_64/include/asm/efi.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EFI_H +#define _ASM_SW64_EFI_H + +#include +#include +#ifdef CONFIG_EFI +extern void efi_init(void); +extern unsigned long entSuspend; + +#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) + +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000001 + +/* arch specific definitions used by the stub code */ + +/* + * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from + * start of kernel and may not cross a 2MiB boundary. We set alignment to + * 2MiB so we know it won't cross a 2MiB boundary. + */ +#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +#define MAX_FDT_OFFSET SZ_512M + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +#endif /* _ASM_SW64_EFI_H */ -- Gitee From 63b9211d3ef1fdf631f0ce1f1bbef21fe85c834f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:21 +0800 Subject: [PATCH 317/953] anolis: sw64: add KVM support ANBZ: #4688 Add KVM support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hcall.h | 41 + arch/sw_64/include/asm/kvm_asm.h | 38 + arch/sw_64/include/asm/kvm_cma.h | 11 + arch/sw_64/include/asm/kvm_emulate.h | 46 + arch/sw_64/include/asm/kvm_host.h | 225 ++++ arch/sw_64/include/asm/kvm_mmio.h | 17 + arch/sw_64/include/asm/kvm_mmu.h | 131 +++ arch/sw_64/include/asm/kvm_para.h | 26 + arch/sw_64/include/asm/kvm_timer.h | 9 + arch/sw_64/include/asm/vcpu.h | 106 ++ arch/sw_64/include/uapi/asm/kvm.h | 131 +++ arch/sw_64/kvm/Kconfig | 49 + arch/sw_64/kvm/Makefile | 20 + arch/sw_64/kvm/emulate.c | 128 +++ arch/sw_64/kvm/entry.S | 263 +++++ arch/sw_64/kvm/handle_exit.c | 85 ++ arch/sw_64/kvm/irq.h | 12 + arch/sw_64/kvm/kvm_cma.c | 269 +++++ arch/sw_64/kvm/kvm_core3.c | 419 +++++++ arch/sw_64/kvm/kvm_core4.c | 132 +++ arch/sw_64/kvm/kvm_timer.c | 83 ++ arch/sw_64/kvm/mmio.c | 89 ++ arch/sw_64/kvm/mmu.c | 1561 ++++++++++++++++++++++++++ arch/sw_64/kvm/perf.c | 27 + arch/sw_64/kvm/sw64.c | 592 ++++++++++ arch/sw_64/kvm/trace.h | 62 + arch/sw_64/kvm/vmem.c | 183 +++ 27 files changed, 4755 insertions(+) create mode 100644 arch/sw_64/include/asm/hcall.h create mode 100644 arch/sw_64/include/asm/kvm_asm.h create mode 100644 arch/sw_64/include/asm/kvm_cma.h create mode 100644 arch/sw_64/include/asm/kvm_emulate.h create mode 100644 arch/sw_64/include/asm/kvm_host.h create mode 100644 arch/sw_64/include/asm/kvm_mmio.h create mode 100644 arch/sw_64/include/asm/kvm_mmu.h create mode 100644 arch/sw_64/include/asm/kvm_para.h create mode 100644 arch/sw_64/include/asm/kvm_timer.h create mode 100644 arch/sw_64/include/asm/vcpu.h create mode 100644 arch/sw_64/include/uapi/asm/kvm.h create mode 100644 arch/sw_64/kvm/Kconfig create mode 100644 arch/sw_64/kvm/Makefile create mode 100644 arch/sw_64/kvm/emulate.c create mode 100644 arch/sw_64/kvm/entry.S create mode 100644 arch/sw_64/kvm/handle_exit.c create mode 100644 arch/sw_64/kvm/irq.h create mode 100644 arch/sw_64/kvm/kvm_cma.c create mode 100644 arch/sw_64/kvm/kvm_core3.c create mode 100644 arch/sw_64/kvm/kvm_core4.c create mode 100644 arch/sw_64/kvm/kvm_timer.c create mode 100644 arch/sw_64/kvm/mmio.c create mode 100644 arch/sw_64/kvm/mmu.c create mode 100644 arch/sw_64/kvm/perf.c create mode 100644 arch/sw_64/kvm/sw64.c create mode 100644 arch/sw_64/kvm/trace.h create mode 100644 arch/sw_64/kvm/vmem.c diff --git a/arch/sw_64/include/asm/hcall.h b/arch/sw_64/include/asm/hcall.h new file mode 100644 index 000000000000..bded05779db7 --- /dev/null +++ b/arch/sw_64/include/asm/hcall.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HCALL_H +#define _ASM_SW64_HCALL_H + +#define HMC_hcall 0x32 +/* HCALL must > 0 */ +enum HCALL_TYPE { + HCALL_HALT = 10, + HCALL_NOTIFY = 11, + HCALL_SHUTDOWN = 12, + HCALL_SET_CLOCKEVENT = 13, + HCALL_IVI = 14, /* interrupt between virtual cpu */ + HCALL_TBI = 15, /* tlb flush for virtual cpu */ + HCALL_STOP = 16, /* indicate virtual cpu stopped */ + HCALL_RESTART = 17, /* indicate virtual cpu restarted */ + HCALL_MSI = 18, /* guest request msi intr */ + HCALL_MSIX = 19, /* guest request msix intr */ + HCALL_SWNET = 20, /* guest request swnet service */ + HCALL_SWNET_IRQ = 21, /* guest request swnet intr */ + HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */ + HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */ + NR_HCALL +}; + +static inline unsigned long hcall(unsigned long hcall, unsigned long arg0, + unsigned long arg1, unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = hcall; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5 " + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#endif /* _ASM_SW64_HCALL_H */ diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h new file mode 100644 index 000000000000..fd1b25018fc8 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_ASM_H +#define _ASM_SW64_KVM_ASM_H + +#define SW64_KVM_EXIT_HOST_INTR 0 +#define SW64_KVM_EXIT_IO 1 +#define SW64_KVM_MIGRATION_SET_DIRTY 2 +#define SW64_KVM_MIGRATION_SET_DIRTY_HM 3 +#define SW64_KVM_EXIT_HALT 10 +#define SW64_KVM_EXIT_SHUTDOWN 12 +#define SW64_KVM_EXIT_TIMER 13 +#define SW64_KVM_EXIT_IPI 14 +#define SW64_KVM_EXIT_STOP 16 +#define SW64_KVM_EXIT_RESTART 17 +#define SW64_KVM_EXIT_APT_FAULT 18 +#define SW64_KVM_EXIT_FATAL_ERROR 22 +#define SW64_KVM_EXIT_MEMHOTPLUG 23 +#define SW64_KVM_EXIT_DEBUG 24 + + +#define kvm_sw64_exception_type \ + {0, "HOST_INTR" }, \ + {1, "IO" }, \ + {10, "HALT" }, \ + {12, "SHUTDOWN" }, \ + {13, "TIMER" }, \ + {14, "IPI" }, \ + {16, "STOP" }, \ + {17, "RESTART" }, \ + {18, "APT_FAULT" }, \ + {22, "FATAL_ERROR" }, \ + {23, "MEMHOTPLUG" }, \ + {24, "DEBUG" } + + +#include + +#endif /* _ASM_SW64_KVM_ASM_H */ diff --git a/arch/sw_64/include/asm/kvm_cma.h b/arch/sw_64/include/asm/kvm_cma.h new file mode 100644 index 000000000000..d50ba599ceb7 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_cma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_CMA_H +#define _ASM_SW64_KVM_CMA_H + +#include + +extern int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma); +#endif /* _ASM_SW64_KVM_CMA_H */ diff --git a/arch/sw_64/include/asm/kvm_emulate.h b/arch/sw_64/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..915aa6c0bce2 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_emulate.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_EMULATE_H +#define _ASM_SW64_KVM_EMULATE_H + +#include +#include + +#define R(x) ((size_t) &((struct kvm_regs *)0)->x) + +static int reg_offsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15), + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), + 0, 0, +}; + + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + regs_ptr += reg_offsets[reg_num]; + *(unsigned long *)regs_ptr = val; +} + +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + if (reg_num == 31) + return 0; + regs_ptr += reg_offsets[reg_num]; + return *(unsigned long *)regs_ptr; +} + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, + struct kvm_run *run); + +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more); +void clear_vcpu_irq(struct kvm_vcpu *vcpu); +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq); +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more); +#endif /* _ASM_SW64_KVM_EMULATE_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h new file mode 100644 index 000000000000..09a995218a2c --- /dev/null +++ b/arch/sw_64/include/asm/kvm_host.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_HOST_H +#define _ASM_SW64_KVM_HOST_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define last_vpn(cpu) (cpu_data[cpu].last_vpn) + +#ifdef CONFIG_SUBARCH_C3B +#define VPN_BITS 8 +#define GUEST_RESET_PC 0xffffffff80011100 +#endif + +#ifdef CONFIG_SUBARCH_C4 +#define VPN_BITS 10 +#define GUEST_RESET_PC 0xfff0000000011002 +#endif + +#define VPN_FIRST_VERSION (1UL << VPN_BITS) +#define VPN_MASK ((1UL << VPN_BITS) - 1) +#define VPN_SHIFT (64 - VPN_BITS) + +#define KVM_MAX_VCPUS 64 +#define KVM_INTERNAL_MEM_SLOTS (KVM_MEM_SLOTS_NUM - 512) + +#define KVM_HALT_POLL_NS_DEFAULT 0 +#define KVM_IRQCHIP_NUM_PINS 256 +/* KVM Hugepage definitions for sw64 */ +#define KVM_NR_PAGE_SIZES 3 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) +#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) +#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) +#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) + +/* + * The architecture supports 48-bit GPA as input to the addtional stage translations. + */ +#define KVM_PHYS_SHIFT (48) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + +struct kvm_arch_memory_slot { + unsigned long host_phys_addr; + bool valid; +}; + +struct kvm_arch { + unsigned long host_phys_addr; + unsigned long size; + + /* segment table */ + unsigned long *seg_pgd; + + struct swvm_mem mem; + /* Addtional stage page table*/ + pgd_t *pgd; +}; + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_arch { + struct kvm_regs regs __aligned(32); + struct vcpucb vcb; + struct task_struct *tsk; + unsigned int pcpu_id; /* current running pcpu id */ + + /* Virtual clock device */ + struct hrtimer hrt; + unsigned long timer_next_event; + unsigned long vtimer_freq; + + int first_run; + int halted; + int stopped; + int restart; + + /* Pending virtual interrupts */ + DECLARE_BITMAP(irqs_pending, SWVM_IRQS); + unsigned long vpnc[NR_CPUS]; + + /* Detect first run of a vcpu */ + bool has_run_once; + + /* WAIT executed */ + int wait; + + /* vcpu power-off state */ + bool power_off; + + /* Don't run the guest (internal implementation need) */ + bool pause; + + struct kvm_decode mmio_decode; + + /* Cache some mmu pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* guest live migration */ + unsigned long migration_mark; + unsigned long shtclock; +}; + +struct vmem_info { + unsigned long start; + size_t size; + atomic_t refcnt; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 pid; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 migration_set_dirty; + u64 shutdown_exits; + u64 restart_exits; + u64 stop_exits; + u64 ipi_exits; + u64 timer_exits; + u64 debug_exits; +#ifdef CONFIG_KVM_MEMHOTPLUG + u64 memhotplug_exits; +#endif + u64 fatal_error_exits; + u64 halt_exits; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_poll_invalid; + u64 signal_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; +}; + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr); +#endif +#ifdef CONFIG_SUBARCH_C4 +#define KVM_ARCH_WANT_MMU_NOTIFIER +#endif +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat); +void check_vcpu_requests(struct kvm_vcpu *vcpu); +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu); +int vmem_init(void); +void vmem_exit(void); +int __sw64_vcpu_run(unsigned long vcb_pa, struct kvm_regs *regs, + struct hcall_args *args); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs); +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); + +int kvm_sw64_perf_init(void); +int kvm_sw64_perf_teardown(void); +void kvm_flush_tlb_all(void); +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn); +int kvm_sw64_init_vm(struct kvm *kvm); +void kvm_sw64_destroy_vm(struct kvm *kvm); +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu); +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg); +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg); + +void update_aptp(unsigned long pgd); +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu); +#endif /* _ASM_SW64_KVM_HOST_H */ diff --git a/arch/sw_64/include/asm/kvm_mmio.h b/arch/sw_64/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..c87b259e9395 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmio.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMIO_H +#define _ASM_SW64_KVM_MMIO_H + +#include +#include + +struct kvm_decode { + unsigned long rt; + bool sign_extend; +}; + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs); + +#endif /* _ASM_SW64_KVM_MMIO_H */ diff --git a/arch/sw_64/include/asm/kvm_mmu.h b/arch/sw_64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..f4493de934ba --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmu.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMU_H +#define _ASM_SW64_KVM_MMU_H + +#define AF_ACCESS_TYPE_SHIFT 55 +#define AF_INV_LEVEL_SHIFT 53 +#define AF_FAULT_STATUS_SHIFT 48 + +#define AF_ACCESS_TYPE_MASK 0x3 +#define AF_INV_LEVEL_MASK 0x3 +#define AF_FAULT_STATUS_MASK 0x1f +#define AF_ENTRY_ADDR_MASK ((0x1UL << AF_FAULT_STATUS_SHIFT) - 1) + +/* access type defination */ +#define AF_READ_ACCESS_TYPE 0x1 +#define AF_WRITE_ACCESS_TYPE 0x2 +#define AF_EXEC_ACCESS_TYPE 0x3 + +/* invalid page level */ +#define AF_INV_LEVEL_1 0 +#define AF_INV_LEVEL_2 1 +#define AF_INV_LEVEL_3 2 +#define AF_INV_LEVEL_4 3 + +/* fault status */ +#define AF_STATUS_MISCONFIG 0x1 +#define AF_STATUS_FOR 0x2 +#define AF_STATUS_FOW 0x4 +#define AF_STATUS_FOE 0x8 +#define AF_STATUS_INV 0x10 + +#define KVM_MMU_CACHE_MIN_PAGES 2 + +static inline void kvm_set_aptpte_readonly(pte_t *pte) +{ + pte_val(*pte) |= _PAGE_FOW; +} + +static inline bool kvm_aptpte_readonly(pte_t *pte) +{ + return (pte_val(*pte) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpmd_readonly(pmd_t *pmd) +{ + pmd_val(*pmd) |= _PAGE_FOW; +} + +static inline bool kvm_aptpmd_readonly(pmd_t *pmd) +{ + return (pmd_val(*pmd) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpud_readonly(pud_t *pud) +{ + pud_val(*pud) |= _PAGE_FOW; +} + +static inline bool kvm_aptpud_readonly(pud_t *pud) +{ + return (pud_val(*pud) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline pte_t kvm_pte_mkwrite(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t kvm_pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOE; + return pte; +} + +static inline bool kvm_pte_exec(pte_t *pte) +{ + return !(pte_val(*pte) & _PAGE_FOE); +} + +static inline pmd_t kvm_pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t kvm_pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOE; + return pmd; +} + +static inline bool kvm_pmd_exec(pmd_t *pmd) +{ + return !(pmd_val(*pmd) & _PAGE_FOE); +} + +static inline pud_t kvm_pud_mkwrite(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOW; + return pud; +} + +static inline pud_t kvm_pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOE; + return pud; +} + +static inline bool kvm_pud_exec(pud_t *pud) +{ + return !(pud_val(*pud) & _PAGE_FOE); +} + +void kvm_core4_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change); +void kvm_core4_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +void kvm_core4_flush_shadow_all(struct kvm *kvm); +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_handle_apt_fault(struct kvm_vcpu *vcpu); +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm); +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); +void apt_unmap_vm(struct kvm *kvm); +#endif /* _ASM_SW64_KVM_MMU_H */ diff --git a/arch/sw_64/include/asm/kvm_para.h b/arch/sw_64/include/asm/kvm_para.h new file mode 100644 index 000000000000..442f1c7d9f83 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_PARA_H +#define _ASM_SW64_KVM_PARA_H + +#include + +#define HMC_hcall 0x32 + +static inline unsigned long kvm_hypercall3(unsigned long num, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = num; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5" + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} +#endif /* _ASM_SW64_KVM_PARA_H */ diff --git a/arch/sw_64/include/asm/kvm_timer.h b/arch/sw_64/include/asm/kvm_timer.h new file mode 100644 index 000000000000..8080873c684f --- /dev/null +++ b/arch/sw_64/include/asm/kvm_timer.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_TIMER_H +#define _ASM_SW64_KVM_TIMER_H + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta); +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +enum hrtimer_restart clockdev_fn(struct hrtimer *timer); + +#endif /* _ASM_SW64_KVM_TIMER_H */ diff --git a/arch/sw_64/include/asm/vcpu.h b/arch/sw_64/include/asm/vcpu.h new file mode 100644 index 000000000000..c4e3caacbc70 --- /dev/null +++ b/arch/sw_64/include/asm/vcpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VCPU_H +#define _ASM_SW64_VCPU_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SUBARCH_C3B + +struct vcpucb { + unsigned long go_flag; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr; + unsigned long soft_tid; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_irq_vector; + unsigned long pri_base; + unsigned long stack_pc_dfault; + unsigned long guest_p20; + unsigned long guest_dfault_double; + unsigned long guest_irqs_pending; + unsigned long guest_hm_r30; + unsigned long migration_mark; + unsigned long guest_longtime; + unsigned long guest_longtime_offset; + unsigned long reserved[3]; +}; + +#else + +struct vcpucb { + unsigned long ktp; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long dtb_upcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr_usr; + unsigned long ptbr_sys; + unsigned long soft_tid; + unsigned long int_stat0; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_pc_save; + unsigned long shtclock_offset; + unsigned long reserved[8]; +}; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_VCPU_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..2253475deaa5 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_KVM_H +#define _UAPI_ASM_SW64_KVM_H + +/* + * KVM SW specific structures and definitions. + */ +#define SWVM_IRQS 256 +#define IRQ_PENDING_INTX_SHIFT 16 +#define IRQ_PENDING_MSI_VECTORS_SHIFT 17 + +enum SW64_KVM_IRQ { + SW64_KVM_IRQ_IPI = 27, + SW64_KVM_IRQ_TIMER = 9, + SW64_KVM_IRQ_KBD = 29, + SW64_KVM_IRQ_MOUSE = 30, +}; + +#define SWVM_VM_TYPE_DEFAULT 0 +#define SWVM_VM_TYPE_PHYVCPU 1 +#define __KVM_HAVE_IRQ_LINE + +#define SWVM_NUM_NUMA_MEMBANKS 1 +#define KVM_NR_IRQCHIPS 1 +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + + unsigned long r27; + unsigned long r28; + unsigned long __padding0; + unsigned long fpcr; + + unsigned long fp[124]; + /* These are saved by HMcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + + +/* + * return stack for __sw64_vcpu_run + */ +struct vcpu_run_ret_stack { + unsigned long ra; + unsigned long r0; +}; + +struct host_int_args { + unsigned long r18; + unsigned long r17; + unsigned long r16; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { +}; + +struct hcall_args { + unsigned long arg0, arg1, arg2; +}; + +struct phyvcpu_hcall_args { + unsigned long call; + struct hcall_args args; +}; + +struct kvm_debug_exit_arch { + unsigned long epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct swvm_mem_bank { + unsigned long guest_phys_addr; + unsigned long host_phys_addr; + unsigned long host_addr; + unsigned long size; +}; + +struct swvm_mem { + struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; +}; + +#endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/kvm/Kconfig b/arch/sw_64/kvm/Kconfig new file mode 100644 index 000000000000..b7e43d0bae51 --- /dev/null +++ b/arch/sw_64/kvm/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + select PREEMPT_NOTIFIERS + select CMA + depends on NET + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI + select KVM_VFIO + select MMU_NOTIFIER + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select TUN + select GENERIC_ALLOCATOR + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + help + Support for hosting Guest kernels. + We don't support KVM with 3-level page tables yet. + + If unsure, say N. + +config KVM_MEMHOTPLUG + bool "Memory hotplug support for guest" + depends on KVM && MEMORY_HOTPLUG && SUBARCH_C3B + help + Provides memory hotplug support for SW64 guest. + + +source "drivers/vhost/Kconfig" + +endif # VIRTUALIZATION diff --git a/arch/sw_64/kvm/Makefile b/arch/sw_64/kvm/Makefile new file mode 100644 index 000000000000..8111014c5cca --- /dev/null +++ b/arch/sw_64/kvm/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += sw64.o +kvm-y += entry.o +kvm-y += emulate.o +kvm-y += mmio.o +kvm-y += kvm_timer.o +kvm-y += handle_exit.o +kvm-y += perf.o +kvm-$(CONFIG_SUBARCH_C3B) += kvm_core3.o kvm_cma.o +kvm-$(CONFIG_SUBARCH_C4) += kvm_core4.o mmu.o diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c new file mode 100644 index 000000000000..fc37461b97a0 --- /dev/null +++ b/arch/sw_64/kvm/emulate.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) +{ + int opc, ra; + +#ifdef CONFIG_SUBARCH_C3B + opc = (insn >> 26) & 0x3f; + ra = (insn >> 21) & 0x1f; +#elif defined(CONFIG_SUBARCH_C4) + unsigned long ds_stat, exc_sum; + + ds_stat = read_csr(CSR_DS_STAT); + exc_sum = read_csr(CSR_EXC_SUM); + + opc = (ds_stat >> 4) & 0x3f; + ra = (exc_sum >> 8) & 0x1f; +#endif + + switch (opc) { + case 0x20: /* LDBU */ + run->mmio.is_write = 0; + run->mmio.len = 1; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x21: /* LDHU */ + run->mmio.is_write = 0; + run->mmio.len = 2; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x22: /* LDW */ + run->mmio.is_write = 0; + run->mmio.len = 4; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x23: /* LDL */ + case 0x24: /* LDL_U */ + run->mmio.is_write = 0; + run->mmio.len = 8; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x28: /* STB */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffUL; + run->mmio.len = 1; + break; + case 0x29: /* STH */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffUL; + run->mmio.len = 2; + break; + case 0x2a: /* STW */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffffffUL; + run->mmio.len = 4; + break; + case 0x2b: /* STL */ + case 0x2c: /* STL_U */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); + run->mmio.len = 8; + break; + default: + pr_info("Miss done opc %d\n", opc); + break; + } +} + +/* + * Virtual Interrupts. + */ +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more) +{ + unsigned int irq; + DECLARE_BITMAP(blk, SWVM_IRQS); + + bitmap_copy(blk, vcpu->arch.irqs_pending, SWVM_IRQS); + + irq = find_last_bit(blk, SWVM_IRQS); + + return irq; +} + +void clear_vcpu_irq(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vcb.vcpu_irq = 0xffffffffffffffffUL; +} + +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + vcpu->arch.vcb.vcpu_irq = irq; +} + +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more) +{ + BUG_ON(irq >= SWVM_IRQS); + + /* Otherwise we check if they have interrupts disabled. */ + if (vcpu->arch.vcb.vcpu_irq_disabled) { + clear_vcpu_irq(vcpu); + return; + } + + /* If they don't have a handler (yet?), we just ignore it */ + if (vcpu->arch.vcb.ent_int != 0) { + /* OK, mark it no longer pending and deliver it. */ + clear_bit(irq, (vcpu->arch.irqs_pending)); + /* + * set_guest_interrupt() takes the interrupt descriptor and a + * flag to say whether this interrupt pushes an error code onto + * the stack as well: virtual interrupts never do. + */ + inject_vcpu_irq(vcpu, irq); + } +} diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S new file mode 100644 index 000000000000..a61ecc387d26 --- /dev/null +++ b/arch/sw_64/kvm/entry.S @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 + */ + .text +#include +#include +#include +#include + + .set noat + +/* + * r16: physical address of guest kvm_vcpu.arch.vcb + * r17: pointer to guest kvm_vcpu.arch.kvm_regs + * r18: pointer to hcall args + */ +ENTRY(__sw64_vcpu_run) + /* save host fpregs */ + rfpcr $f0 + fstd $f0, TASK_THREAD_FPCR($8) + vstd $f2, TASK_THREAD_F2($8) + vstd $f3, TASK_THREAD_F3($8) + vstd $f4, TASK_THREAD_F4($8) + vstd $f5, TASK_THREAD_F5($8) + vstd $f6, TASK_THREAD_F6($8) + vstd $f7, TASK_THREAD_F7($8) + vstd $f8, TASK_THREAD_F8($8) + vstd $f9, TASK_THREAD_F9($8) + + ldi sp, -VCPU_RET_SIZE(sp) + /* save host pt_regs to current kernel stack */ + ldi sp, -PT_REGS_SIZE(sp) + stl $9, PT_REGS_R9(sp) + stl $8, PT_REGS_R8(sp) + stl $10, PT_REGS_R10(sp) + stl $11, PT_REGS_R11(sp) + stl $12, PT_REGS_R12(sp) + stl $13, PT_REGS_R13(sp) + stl $14, PT_REGS_R14(sp) + stl $15, PT_REGS_R15(sp) + stl $26, PT_REGS_R26(sp) + + /* restore guest switch stack from guest kvm_regs struct */ + ldl $0, KVM_REGS_R0($17) + ldl $1, KVM_REGS_R1($17) + /* restore $2 later */ + ldl $3, KVM_REGS_R3($17) + ldl $4, KVM_REGS_R4($17) + ldl $5, KVM_REGS_R5($17) + ldl $6, KVM_REGS_R6($17) + ldl $7, KVM_REGS_R7($17) + ldl $8, KVM_REGS_R8($17) + ldl $9, KVM_REGS_R9($17) + ldl $10, KVM_REGS_R10($17) + ldl $11, KVM_REGS_R11($17) + ldl $12, KVM_REGS_R12($17) + ldl $13, KVM_REGS_R13($17) + ldl $14, KVM_REGS_R14($17) + ldl $15, KVM_REGS_R15($17) + ldl $19, KVM_REGS_R19($17) + ldl $20, KVM_REGS_R20($17) + ldl $21, KVM_REGS_R21($17) + ldl $22, KVM_REGS_R22($17) + ldl $23, KVM_REGS_R23($17) + ldl $24, KVM_REGS_R24($17) + ldl $25, KVM_REGS_R25($17) + ldl $26, KVM_REGS_R26($17) + ldl $27, KVM_REGS_R27($17) + ldl $28, KVM_REGS_R28($17) + + fldd $f0, KVM_REGS_FPCR($17) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $g_setfpec_0 + subl $2, 0x1, $2 + beq $2, $g_setfpec_1 + subl $2, 0x1, $2 + beq $2, $g_setfpec_2 + setfpec3 + br $g_setfpec_over +$g_setfpec_0: + setfpec0 + br $g_setfpec_over +$g_setfpec_1: + setfpec1 + br $g_setfpec_over +$g_setfpec_2: + setfpec2 +$g_setfpec_over: + ldl $2, KVM_REGS_R2($17) + vldd $f0, KVM_REGS_F0($17) + vldd $f1, KVM_REGS_F1($17) + vldd $f2, KVM_REGS_F2($17) + vldd $f3, KVM_REGS_F3($17) + vldd $f4, KVM_REGS_F4($17) + vldd $f5, KVM_REGS_F5($17) + vldd $f6, KVM_REGS_F6($17) + vldd $f7, KVM_REGS_F7($17) + vldd $f8, KVM_REGS_F8($17) + vldd $f9, KVM_REGS_F9($17) + vldd $f10, KVM_REGS_F10($17) + vldd $f11, KVM_REGS_F11($17) + vldd $f12, KVM_REGS_F12($17) + vldd $f13, KVM_REGS_F13($17) + vldd $f14, KVM_REGS_F14($17) + vldd $f15, KVM_REGS_F15($17) + vldd $f16, KVM_REGS_F16($17) + vldd $f17, KVM_REGS_F17($17) + vldd $f18, KVM_REGS_F18($17) + vldd $f19, KVM_REGS_F19($17) + vldd $f20, KVM_REGS_F20($17) + vldd $f21, KVM_REGS_F21($17) + vldd $f22, KVM_REGS_F22($17) + vldd $f23, KVM_REGS_F23($17) + vldd $f24, KVM_REGS_F24($17) + vldd $f25, KVM_REGS_F25($17) + vldd $f26, KVM_REGS_F26($17) + vldd $f27, KVM_REGS_F27($17) + vldd $f28, KVM_REGS_F28($17) + vldd $f29, KVM_REGS_F29($17) + vldd $f30, KVM_REGS_F30($17) + + ldi $17, KVM_REGS_PS($17) + + /* enter guest */ + /* r16 = guest vcpucb pointer */ + /* r17 = base of guest kvm_regs.ps, saved/restored by hmcode */ + + /* enter guest now */ + sys_call 0x31 + /* exit guest now */ + + ldi $17, -KVM_REGS_PS($17) /* r17: base of kvm_regs */ + + vstd $f0, KVM_REGS_F0($17) + vstd $f1, KVM_REGS_F1($17) + vstd $f2, KVM_REGS_F2($17) + vstd $f3, KVM_REGS_F3($17) + vstd $f4, KVM_REGS_F4($17) + vstd $f5, KVM_REGS_F5($17) + vstd $f6, KVM_REGS_F6($17) + vstd $f7, KVM_REGS_F7($17) + vstd $f8, KVM_REGS_F8($17) + vstd $f9, KVM_REGS_F9($17) + vstd $f10, KVM_REGS_F10($17) + vstd $f11, KVM_REGS_F11($17) + vstd $f12, KVM_REGS_F12($17) + vstd $f13, KVM_REGS_F13($17) + vstd $f14, KVM_REGS_F14($17) + vstd $f15, KVM_REGS_F15($17) + vstd $f16, KVM_REGS_F16($17) + vstd $f17, KVM_REGS_F17($17) + vstd $f18, KVM_REGS_F18($17) + vstd $f19, KVM_REGS_F19($17) + vstd $f20, KVM_REGS_F20($17) + vstd $f21, KVM_REGS_F21($17) + vstd $f22, KVM_REGS_F22($17) + vstd $f23, KVM_REGS_F23($17) + vstd $f24, KVM_REGS_F24($17) + vstd $f25, KVM_REGS_F25($17) + vstd $f26, KVM_REGS_F26($17) + vstd $f27, KVM_REGS_F27($17) + vstd $f28, KVM_REGS_F28($17) + vstd $f29, KVM_REGS_F29($17) + vstd $f30, KVM_REGS_F30($17) + + rfpcr $f0 + fstd $f0, KVM_REGS_FPCR($17) + + /* don't save r0 Hmcode have saved r0 for us */ + stl $1, KVM_REGS_R1($17) + stl $2, KVM_REGS_R2($17) + stl $3, KVM_REGS_R3($17) + stl $4, KVM_REGS_R4($17) + stl $5, KVM_REGS_R5($17) + stl $6, KVM_REGS_R6($17) + stl $7, KVM_REGS_R7($17) + stl $8, KVM_REGS_R8($17) + stl $9, KVM_REGS_R9($17) + stl $10, KVM_REGS_R10($17) + stl $11, KVM_REGS_R11($17) + stl $12, KVM_REGS_R12($17) + stl $13, KVM_REGS_R13($17) + stl $14, KVM_REGS_R14($17) + stl $15, KVM_REGS_R15($17) + stl $19, KVM_REGS_R19($17) + stl $20, KVM_REGS_R20($17) + stl $21, KVM_REGS_R21($17) + stl $22, KVM_REGS_R22($17) + stl $23, KVM_REGS_R23($17) + stl $24, KVM_REGS_R24($17) + stl $25, KVM_REGS_R25($17) + stl $26, KVM_REGS_R26($17) + stl $27, KVM_REGS_R27($17) + stl $28, KVM_REGS_R28($17) + + /* restore host regs from host sp */ + ldl $8, PT_REGS_R8(sp) + ldl $9, PT_REGS_R9(sp) + ldl $10, PT_REGS_R10(sp) + ldl $11, PT_REGS_R11(sp) + ldl $12, PT_REGS_R12(sp) + ldl $13, PT_REGS_R13(sp) + ldl $14, PT_REGS_R14(sp) + ldl $15, PT_REGS_R15(sp) + ldl $26, PT_REGS_R26(sp) + ldi sp, PT_REGS_SIZE(sp) + + /* restore host fpregs */ + fldd $f0, TASK_THREAD_FPCR($8) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $setfpec_0 + subl $2, 0x1, $2 + beq $2, $setfpec_1 + subl $2, 0x1, $2 + beq $2, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f2, TASK_THREAD_F2($8) + vldd $f3, TASK_THREAD_F3($8) + vldd $f4, TASK_THREAD_F4($8) + vldd $f5, TASK_THREAD_F5($8) + vldd $f6, TASK_THREAD_F6($8) + vldd $f7, TASK_THREAD_F7($8) + vldd $f8, TASK_THREAD_F8($8) + vldd $f9, TASK_THREAD_F9($8) + + /* if $0 > 0, handle hcall */ + bgt $0, $ret_to + + stl $26, VCPU_RET_RA(sp) + stl $0, VCPU_RET_R0(sp) + + /* Hmcode will setup in */ + /* restore $16 $17 $18, do interrupt trick */ + ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp) + ldl $16, HOST_INT_R16(sp) + ldl $17, HOST_INT_R17(sp) + ldl $18, HOST_INT_R18(sp) + ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp) + + ldi $19, -PT_REGS_SIZE(sp) + call $26, do_entInt + ldl $26, VCPU_RET_RA(sp) + ldl $0, VCPU_RET_R0(sp) +$ret_to: + /* ret($0) indicate hcall number */ + ldi sp, VCPU_RET_SIZE(sp) /* pop stack */ + ret diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c new file mode 100644 index 000000000000..69b97860db88 --- /dev/null +++ b/arch/sw_64/kvm/handle_exit.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include +#include +#include +#include + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs) +{ + gfn_t gfn __maybe_unused; + + switch (exception_index) { + case SW64_KVM_EXIT_IO: + vcpu->stat.io_exits++; + return io_mem_abort(vcpu, run, hargs); + case SW64_KVM_MIGRATION_SET_DIRTY_HM: + case SW64_KVM_MIGRATION_SET_DIRTY: + vcpu->stat.migration_set_dirty++; + gfn = hargs->arg2 >> 24; + mutex_lock(&vcpu->kvm->slots_lock); + kvm_vcpu_mark_page_dirty(vcpu, gfn); + mutex_unlock(&vcpu->kvm->slots_lock); + return 1; + case SW64_KVM_EXIT_HALT: + vcpu->stat.halt_exits++; + vcpu->arch.halted = 1; + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_SHUTDOWN: + vcpu->stat.shutdown_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; + return 0; + case SW64_KVM_EXIT_RESTART: + vcpu->stat.restart_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; + return 0; + case SW64_KVM_EXIT_STOP: + vcpu->stat.stop_exits++; + vcpu->arch.halted = 1; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_TIMER: + vcpu->stat.timer_exits++; + set_timer(vcpu, hargs->arg0); + return 1; + case SW64_KVM_EXIT_IPI: + vcpu->stat.ipi_exits++; + vcpu_send_ipi(vcpu, hargs->arg0, hargs->arg1); + return 1; + case SW64_KVM_EXIT_DEBUG: + vcpu->stat.debug_exits++; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + return 0; +#ifdef CONFIG_KVM_MEMHOTPLUG + case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu->stat.memhotplug_exits++; + vcpu_mem_hotplug(vcpu, hargs->arg0); + return 1; +#endif +#ifdef CONFIG_SUBARCH_C4 + case SW64_KVM_EXIT_APT_FAULT: + return kvm_handle_guest_abort(vcpu, run); +#endif + case SW64_KVM_EXIT_FATAL_ERROR: + vcpu->stat.fatal_error_exits++; + pr_err("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = hargs->arg0; + return 0; + } + + return 1; +} diff --git a/arch/sw_64/kvm/irq.h b/arch/sw_64/kvm/irq.h new file mode 100644 index 000000000000..9268ab6af492 --- /dev/null +++ b/arch/sw_64/kvm/irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * irq.h: in kernel interrupt controller related definitions + */ + +#ifndef _SW64_KVM_IRQ_H +#define _SW64_KVM_IRQ_H +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return 1; +} +#endif /* _SW64_KVM_IRQ_H */ diff --git a/arch/sw_64/kvm/kvm_cma.c b/arch/sw_64/kvm/kvm_cma.c new file mode 100644 index 000000000000..de04eb5d20d7 --- /dev/null +++ b/arch/sw_64/kvm/kvm_cma.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Contiguous Memory Allocator for KVM + * + * This program is modified on the basis of CMA, to achieve cross-node + * memory reservation, as well as reserved memory information statistics. + */ + +#define pr_fmt(fmt) "kvm_cma: " fmt + +#include +#include +#include +#include +#include +#include + +#include "../../../mm/cma.h" +#include "../../../mm/internal.h" + +struct cma kvm_cma_areas[MAX_CMA_AREAS]; +unsigned int kvm_cma_area_count; + +static void __init init_kvm_cma_reserved_pageblock(struct page *page) +{ + unsigned int i = pageblock_nr_pages; + struct page *p = page; + + do { + __ClearPageReserved(p); + set_page_count(p, 0); + } while (++p, --i); + + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + + adjust_managed_page_count(page, pageblock_nr_pages); +} + +static int __init kvm_cma_activate_area(struct cma *cma) +{ + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned int i = cma->count >> pageblock_order; + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) { + cma->count = 0; + return -ENOMEM; + } + + WARN_ON_ONCE(!pfn_valid(pfn)); + + do { + unsigned int j; + + base_pfn = pfn; + + for (j = pageblock_nr_pages; j; --j, pfn++) + WARN_ON_ONCE(!pfn_valid(pfn)); + + init_kvm_cma_reserved_pageblock(pfn_to_page(base_pfn)); + } while (--i); + + spin_lock_init(&cma->lock); + + return 0; +} + +static int __init kvm_cma_init_reserved_areas(void) +{ + int i; + + for (i = 0; i < kvm_cma_area_count; i++) { + int ret = kvm_cma_activate_area(&kvm_cma_areas[i]); + + if (ret) + return ret; + } + + return 0; +} +core_initcall(kvm_cma_init_reserved_areas); + +/** + * kvm_cma_init_reserved_mem() - create custom contiguous area + * from reserved memory + * @base: Base address of the reserved area + * @size: Size of the reserved area (in bytes), + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. If this parameter is NULL, the name of + * the area will be set to "cmaN", where N is a running counter of + * used areas. + * @res_cma: Pointer to store the created cma region. + * + * This function creates custom contiguous area from already reserved memory. + */ +int __init kvm_cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name, + struct cma **res_cma) +{ + struct cma *cma; + phys_addr_t alignment; + + /* Sanity checks */ + if (kvm_cma_area_count == ARRAY_SIZE(kvm_cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* ensure minimal alignment required by mm core */ + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); + + /* alignment should be aligned with order_per_bit */ + if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + return -EINVAL; + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &kvm_cma_areas[kvm_cma_area_count]; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + kvm_cma_area_count++; + totalcma_pages += (size / PAGE_SIZE); + + return 0; +} + +/** + * kvm_cma_declare_contiguous() - reserve contiguous area for VM + * @base: Base address of the reserved area optional, + * @size: Size of the reserved area (in bytes), + * @limit: End address of the reserved memory (optional, 0 for any). + * @alignment: Alignment for the CMA area, should be power of 2 or zero + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. See function cma_init_reserved_mem() + * @res_cma: Pointer to store the created cma region. + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas. + */ +int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma) +{ + phys_addr_t memblock_end = memblock_end_of_DRAM(); + phys_addr_t highmem_start; + int ret = 0; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + highmem_start = __pa(high_memory - 1) + 1; + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + /* + * Sanitise input arguments. + * Pages both ends in CMA area could be merged into adjacent unmovable + * migratetype page by page allocator's buddy algorithm. In the case, + * you couldn't get a contiguous memory, which is not what we want. + */ + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); + if (base & (alignment - 1)) { + ret = -EINVAL; + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + goto err; + } + base = ALIGN(base, alignment); + size = ALIGN(size, alignment); + limit &= ~(alignment - 1); + + if (!base) { + pr_err("Base address of region must be needed!\n"); + goto err; + } + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * The request region must not cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + ret = -EINVAL; + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + goto err; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit == 0 || limit > memblock_end) + limit = memblock_end; + + if (base + size > limit) { + ret = -EINVAL; + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + goto err; + } + + /* Reserve memory */ + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + ret = -EBUSY; + goto err; + } + ret = kvm_cma_init_reserved_mem(base, size, order_per_bit, + name, res_cma); + if (ret) + goto free_mem; + + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, + &base); + return 0; + +free_mem: + memblock_free((void *)base, size); +err: + pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); + return ret; +} diff --git a/arch/sw_64/kvm/kvm_core3.c b/arch/sw_64/kvm/kvm_core3.c new file mode 100644 index 000000000000..f7e9150d40e0 --- /dev/null +++ b/arch/sw_64/kvm/kvm_core3.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include "vmem.c" + +__read_mostly bool bind_vcpu_enabled; + +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_NUMA) +static int __init bind_vcpu_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + debugfs_create_bool("bind_vcpu", 0644, + sw64_debugfs_dir, &bind_vcpu_enabled); + return 0; +} + +static void bind_vcpu_exit(void) +{ + bind_vcpu_enabled = false; +} +#else +static int __init bind_vcpu_init(void) +{ + return 0; +} + +static void bind_vcpu_exit(void) { } + +#endif + +static unsigned long longtime_offset; + +#ifdef CONFIG_KVM_MEMHOTPLUG +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base; + + base = virt_to_phys(vcpu->kvm->arch.seg_pgd); + return base | ((vpn & VPN_MASK) << 44); +} +#else +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base, size; + + base = vcpu->kvm->arch.host_phys_addr; + size = vcpu->kvm->arch.size; + return (base >> 23) | ((size >> 23) << 16) | ((vpn & VPN_MASK) << 44); +} +#endif + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.vcb.vpcr == 0) { + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); +#ifndef CONFIG_KVM_MEMHOTPLUG + if (unlikely(bind_vcpu_enabled)) { + int nid; + unsigned long end; + + end = vcpu->kvm->arch.host_phys_addr + vcpu->kvm->arch.size; + nid = pfn_to_nid(PHYS_PFN(vcpu->kvm->arch.host_phys_addr)); + if (pfn_to_nid(PHYS_PFN(end)) == nid) + set_cpus_allowed_ptr(vcpu->arch.tsk, cpumask_of_node(nid)); + } +#endif + vcpu->arch.vcb.upcr = 0x7; + } +} + +void kvm_flush_tlb_all(void) +{ + tbia(); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = ((vcpu->arch.vcb.vpcr) & (~(VPN_MASK << 44))) | (vpn << 44); + vcpu->arch.vcb.dtb_vpcr = ((vcpu->arch.vcb.dtb_vpcr) & (~(VPN_MASK << VPN_SHIFT))) | (vpn << VPN_SHIFT); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ +#ifdef CONFIG_KVM_MEMHOTPLUG + unsigned long *seg_pgd; + + if (kvm->arch.seg_pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + seg_pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!seg_pgd) + return -ENOMEM; + + kvm->arch.seg_pgd = seg_pgd; + #endif + return 0; +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + #ifdef CONFIG_KVM_MEMHOTPLUG + void *seg_pgd = NULL; + + if (kvm->arch.seg_pgd) { + seg_pgd = READ_ONCE(kvm->arch.seg_pgd); + kvm->arch.seg_pgd = NULL; + } + + if (seg_pgd) + free_pages_exact(seg_pgd, PAGE_SIZE); + #endif + kvm_destroy_vcpus(kvm); +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +static void setup_segment_table(struct kvm *kvm, + struct kvm_memory_slot *memslot, unsigned long addr, size_t size) +{ + unsigned long *seg_pgd = kvm->arch.seg_pgd; + unsigned long num_of_entry; + unsigned long base_hpa = addr; + unsigned long i; + + num_of_entry = round_up(size, 1 << 30) >> 30; + + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } +} +#endif + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + unsigned long addr; + struct file *vm_file; + struct vm_area_struct *vma; + struct vmem_info *info; + struct kvm_userspace_memory_region new_mem; + struct kvm_userspace_memory_region *mem = &new_mem; + unsigned long ret; + size_t size; + + mem->flags = new->flags; + mem->guest_phys_addr = ((new->base_gfn) << PAGE_SHIFT); + mem->memory_size = ((new->npages) << PAGE_SHIFT); + mem->userspace_addr = new->userspace_addr; + + if (change == KVM_MR_FLAGS_ONLY || change == KVM_MR_DELETE) + return 0; + + if (test_bit(IO_MARK_BIT, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + + if (test_bit(IO_MARK_BIT + 1, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + +#ifndef CONFIG_KVM_MEMHOTPLUG + if (mem->guest_phys_addr) { + pr_info("%s, No KVM MEMHOTPLUG support!\n", __func__); + return 0; + } +#endif + if (!sw64_kvm_pool) + return -ENOMEM; + + pr_info("%s: %#llx %#llx, user addr: %#llx\n", __func__, + mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + vm_file = vma->vm_file; + + if (!vm_file) { + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + + size = round_up(mem->memory_size, 8<<20); + addr = gen_pool_alloc(sw64_kvm_pool, size); + if (!addr) + return -ENOMEM; + vm_munmap(mem->userspace_addr, mem->memory_size); + ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + + info->start = addr; + info->size = size; + vma->vm_private_data = (void *) info; + + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) + return ret; + } else { + info = vm_file->private_data; + addr = info->start; + } + + pr_info("guest phys addr = %#lx, size = %#lx\n", + addr, vma->vm_end - vma->vm_start); + + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8<<20); + + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +/* + * kvm_mark_migration write the mark on every vcpucbs of the kvm, which tells + * the system to do migration while the mark is on, and flush all vcpu's tlbs + * at the beginning of the migration. + */ +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.vcb.migration_mark = mark << 2; + + kvm_flush_remote_tlbs(kvm); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + + + /* If it's the first time dirty logging, flush all vcpu tlbs. */ + if ((change == KVM_MR_FLAGS_ONLY) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mark_migration(kvm, 1); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + unsigned long addr = vcpu->kvm->arch.host_phys_addr; + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + if (vcpu->vcpu_id == 0) + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.vcb.migration_mark) { + unsigned long result = sw64_io_read(0, LONG_TIME) + + vcpu->arch.vcb.guest_longtime_offset; + vcpu->arch.vcb.guest_longtime = result; + vcpu->arch.vcb.guest_irqs_pending = vcpu->arch.irqs_pending[0]; + } + + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + unsigned long result; + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.vcb.migration_mark) { + /* updated vpcr needed by destination vm */ + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) { + result = sw64_io_read(0, LONG_TIME); + vcpu->arch.vcb.guest_longtime_offset = vcpu->arch.vcb.guest_longtime - result; + longtime_offset = vcpu->arch.vcb.guest_longtime_offset; + } else + vcpu->arch.vcb.guest_longtime_offset = longtime_offset; + + set_timer(vcpu, 200000000); + vcpu->arch.vcb.migration_mark = 0; + } + + return 0; +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + unsigned long start_pfn = start_addr >> PAGE_SHIFT; + + kvm_for_each_memslot(slot, kvm_memslots(kvm)) { + if (start_pfn == slot->base_gfn) { + unsigned long *seg_pgd; + unsigned long num_of_entry = slot->npages >> 17; + unsigned long base_hpa = slot->arch.host_phys_addr; + unsigned long i; + + seg_pgd = kvm->arch.seg_pgd + (start_pfn >> 17); + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } + } + } +} +#endif + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, + unsigned long mask) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void update_aptp(unsigned long pgd) +{ +} + +static int __init kvm_core3_init(void) +{ + int i, ret; + + bind_vcpu_init(); + + ret = vmem_init(); + if (unlikely(ret)) + goto out; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (likely(!ret)) + return 0; + + vmem_exit(); +out: + bind_vcpu_exit(); + return ret; +} + +static void __exit kvm_core3_exit(void) +{ + kvm_exit(); + vmem_exit(); + bind_vcpu_exit(); +} + +module_init(kvm_core3_init); +module_exit(kvm_core3_exit); diff --git a/arch/sw_64/kvm/kvm_core4.c b/arch/sw_64/kvm/kvm_core4.c new file mode 100644 index 000000000000..08d28a365a3b --- /dev/null +++ b/arch/sw_64/kvm/kvm_core4.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long shtclock_offset; + +void update_aptp(unsigned long pgd) +{ + imemb(); + write_csr_imb(pgd, CSR_APTP); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = vpn << 44; + vcpu->arch.vcb.dtb_vpcr = vpn; +} + +void kvm_flush_tlb_all(void) +{ + tbivpn(-1, 0, 0); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ + return kvm_alloc_addtional_stage_pgd(kvm); +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.has_run_once) + apt_unmap_vm(vcpu->kvm); + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.migration_mark) + vcpu->arch.shtclock = read_csr(CSR_SHTCLOCK) + + vcpu->arch.vcb.shtclock_offset; + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.migration_mark) { + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) + shtclock_offset = vcpu->arch.shtclock - read_csr(CSR_SHTCLOCK); + vcpu->arch.vcb.shtclock_offset = shtclock_offset; + set_timer(vcpu, 200000000); + vcpu->arch.migration_mark = 0; + } + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ +} + +static int __init kvm_core4_init(void) +{ + int i, ret; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + return 0; +} + +static void __exit kvm_core4_exit(void) +{ + kvm_exit(); +} + +module_init(kvm_core4_init); +module_exit(kvm_core4_exit); diff --git a/arch/sw_64/kvm/kvm_timer.c b/arch/sw_64/kvm/kvm_timer.c new file mode 100644 index 000000000000..895be63cd8d1 --- /dev/null +++ b/arch/sw_64/kvm/kvm_timer.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + */ +#include +#include +#include +#include + +/* + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in ticks). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + */ + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta) +{ + ktime_t expires; + + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + return; + } + + /* Convert clock event device ticks to nanoseconds */ + delta = delta * NSEC_PER_SEC; + do_div(delta, vcpu->arch.vtimer_freq); + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + + expires = ktime_add_ns(ktime_get_real(), delta); + vcpu->arch.timer_next_event = expires; + hrtimer_start(&vcpu->arch.hrt, expires, HRTIMER_MODE_ABS); +} + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_bit(irq, (vcpu->arch.irqs_pending)); + + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ + kvm_vcpu_kick(vcpu); +} + +enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + ktime_t now, delta; + + vcpu = container_of(timer, struct kvm_vcpu, arch.hrt); + + now = ktime_get_real(); + + if (now < vcpu->arch.timer_next_event) { + delta = vcpu->arch.timer_next_event - now; + hrtimer_forward_now(timer, delta); + return HRTIMER_RESTART; + } + + set_interrupt(vcpu, SW64_KVM_IRQ_TIMER); + return HRTIMER_NORESTART; +} diff --git a/arch/sw_64/kvm/mmio.c b/arch/sw_64/kvm/mmio.c new file mode 100644 index 000000000000..21ad89722f9a --- /dev/null +++ b/arch/sw_64/kvm/mmio.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +static unsigned long mmio_read_buf(char *buf, unsigned int len) +{ + unsigned long data = 0; + union { + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + data = buf[0]; + break; + case 2: + memcpy(&tmp.hword, buf, len); + data = tmp.hword; + break; + case 4: + memcpy(&tmp.word, buf, len); + data = tmp.word; + break; + case 8: + memcpy(&tmp.dword, buf, len); + data = tmp.dword; + break; + } + + return data; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long data; + unsigned int len; + + if (!run->mmio.is_write) { + len = run->mmio.len; + if (len > sizeof(unsigned long)) + return -EINVAL; + + data = mmio_read_buf(run->mmio.data, len); + vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); + } + + vcpu->stat.mmio_exits++; + vcpu->arch.regs.pc += 4; + + return 0; +} + +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs) +{ + int ret; + +#ifdef CONFIG_SUBARCH_C3B + run->mmio.phys_addr = hargs->arg1 & 0xfffffffffffffUL; + sw64_decode(vcpu, hargs->arg2, run); +#elif defined(CONFIG_SUBARCH_C4) + run->mmio.phys_addr = read_csr(CSR_DVA) & 0xfffffffffffffUL; + sw64_decode(vcpu, 0, run); +#endif + if (run->mmio.is_write) + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + else + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + + if (!ret) { + /* We handled the access successfully in the kernel. */ + kvm_handle_mmio_return(vcpu, run); + return 1; + } + + run->exit_reason = KVM_EXIT_MMIO; + return 0; +} diff --git a/arch/sw_64/kvm/mmu.c b/arch/sw_64/kvm/mmu.c new file mode 100644 index 000000000000..b0b492a4fbff --- /dev/null +++ b/arch/sw_64/kvm/mmu.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - os kernal + * Author: lff + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define KVM_APT_FLAG_LOGGING_ACTIVE (1UL << 1) + +static bool memslot_is_logging(struct kvm_memory_slot *memslot) +{ + return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); +} + +/* + * Return values of kvm_handle_mmio_page_fault and mmu.page_fault: + * RET_AF_RETRY: let CPU fault again on the address. + * RET_AF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For kvm_handle_mmio_page_fault only: + * RET_AF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_AF_RETRY = 0, + RET_AF_EMULATE = 1, + RET_AF_INVALID = 2, +}; + +/** + * apt_dissolve_pmd() - clear and flush huge PMD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pmd: pmd pointer for IPA + * + * Function clears a PMD entry, flushes TLBs. + */ +static void apt_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) +{ + int i; + + if (!pmd_trans_huge(*pmd)) + return; + + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); +} + +/** + * apt_dissolve_pud() - clear and flush huge PUD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pud: pud pointer for IPA + * + * Function clears a PUD entry, flushes TLBs. + */ +static void apt_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) +{ + if (!pud_huge(*pudp)) + return; + + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pudp)); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + int min, int max) +{ + void *page; + + BUG_ON(max > KVM_NR_MEM_OBJS); + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < max) { + page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + BUG_ON(!mc || !mc->nobjs); + p = mc->objects[--mc->nobjs]; + return p; +} + +static void unmap_apt_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte, *start_pte; + struct page *ptr_page; + + start_pte = pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + put_page(virt_to_page(pte)); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + + ptr_page = virt_to_page(start_pte); + if (page_count(ptr_page) == 1) { + pte_t *pte_table = pte_offset_kernel(pmd, 0); + + pmd_clear(pmd); + free_page((unsigned long)pte_table); + put_page(virt_to_page(pmd)); + } +} + +static void unmap_apt_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pmd_t *pmd, *start_pmd; + struct page *ptr_page; + int i; + + start_pmd = pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); + } else { + unmap_apt_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pmd); + if (page_count(ptr_page) == 1) { + pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0UL); + + pud_clear(pud); + free_page((unsigned long)pmd_table); + put_page(virt_to_page(pud)); + } +} + +static void unmap_apt_puds(struct kvm *kvm, p4d_t *p4d, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pud_t *pud, *start_pud; + struct page *ptr_page; + + start_pud = pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + pud_clear(pud); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pud)); + } else { + unmap_apt_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pud); + if (page_count(ptr_page) == 1) { + pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); + + p4d_clear(p4d); + kvm_flush_remote_tlbs(kvm); + free_page((unsigned long)pud_table); + put_page(virt_to_page(p4d)); + } +} + +/** + * unmap_apt_range -- Clear addtional page table entries to unmap a range + * @kvm: The VM pointer + * @start: The intermediate physical base address of the range to unmap + * @size: The size of the area to unmap + * + * Clear a range of apt mappings, lowering the various ref-counts. Must + * be called while holding mmu_lock (unless for freeing the apt pgd before + * destroying the VM), otherwise another faulting VCPU may come in and mess + * with things behind our backs. + */ +static void unmap_apt_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t addr = start, end = start + size; + phys_addr_t next; + + assert_spin_locked(&kvm->mmu_lock); + WARN_ON(size & ~PAGE_MASK); + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + do { + /* + * Make sure the page table is still active, as another thread + * could have possibly freed the page table, while we released + * the lock. + */ + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (!p4d_none(*p4d)) + unmap_apt_puds(kvm, p4d, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); +} + +static void apt_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + + unmap_apt_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * apt_unmap_vm - Unmap Additional Stage RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void apt_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_lock); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + apt_unmap_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + +static pud_t *apt_get_pud(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) +{ + p4d_t *p4d; + pud_t *pud; + + pgd += pgd_index(addr); + if (pgd_none(*pgd)) { + /* Not used on SW64 yet */ + VM_BUG_ON(pgd); + return NULL; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + if (!cache) + return NULL; + pud = mmu_memory_cache_alloc(cache); + p4d_populate(NULL, p4d, pud); + get_page(virt_to_page(p4d)); + } + return pud_offset(p4d, addr); +} + +static pmd_t *apt_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, unsigned long sz) +{ + pud_t *pud; + pmd_t *pmd; + + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud || pud_huge(*pud)) + return NULL; + + if (pud_none(*pud)) { + if (!cache) + return NULL; + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + return pmd_offset(pud, addr); +} + +static bool kvm_is_write_fault(unsigned long access_type) +{ + if (access_type == AF_WRITE_ACCESS_TYPE) + return true; + + return false; +} + +static bool kvm_is_exec_fault(unsigned long access_type) +{ + if (access_type == AF_EXEC_ACCESS_TYPE) + return true; + + return false; +} +/** + * apt_wp_ptes - write protect PMD range + * @pmd: pointer to pmd entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + if (!kvm_aptpte_readonly(pte)) + kvm_set_aptpte_readonly(pte); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +/** + * apt_wp_pmds - write protect PUD range + * @pud: pointer to pud entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (!kvm_aptpmd_readonly(pmd)) + kvm_set_aptpmd_readonly(pmd); + } else { + apt_wp_ptes(pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +/** + * apt_wp_puds - write protect PGD range + * @pgd: pointer to pgd entry + * @addr: range start address + * @end: range end address + * + * Process PUD entries, for a huge PUD we cause a panic. + */ +static void apt_wp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + if (!kvm_aptpud_readonly(pud)) + kvm_set_aptpud_readonly(pud); + } else { + /* TODO:PUD not supported, revisit later if supported */ +// BUG_ON(pud_trans_huge(*pud)); + apt_wp_pmds(pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +/** + * apt_wp_range() - write protect apt memory region range + * @kvm: The KVM pointer + * @addr: Start address of range + * @end: End address of range + */ +static void apt_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t next; + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + + do { + /* + * Release kvm_mmu_lock periodically if the memory region is + * large. Otherwise, we may see kernel panics with + * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, + * CONFIG_LOCKDEP. Additionally, holding the lock too long + * will also starve other vCPUs. We have to also make sure + * that the page tables are not freed while we released + * the lock. + */ + cond_resched_lock(&kvm->mmu_lock); + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (p4d_present(*p4d)) + apt_wp_puds(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + +/** + * kvm_mmu_wp_memory_region() - write protect apt entries for memory slot + * @kvm: The KVM pointer + * @slot: The memory slot to write protect + * + * Called to start logging dirty pages after memory region + * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns + * all present PMD and PTEs are write protected in the memory region. + * Afterwards read of dirty page log can be called. + * + * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, + * serializing operations for VM memory regions. + */ +void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); + phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + apt_wp_range(kvm, start, end); + spin_unlock(&kvm->mmu_lock); + kvm_flush_remote_tlbs(kvm); // 需要通知其他vcpu进行tlb刷新,利用request机制 +} + +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.migration_mark = mark; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + if (change == KVM_MR_FLAGS_ONLY && (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + kvm_mark_migration(kvm, 1); + kvm_mmu_wp_memory_region(kvm, new->id); + } + /* If dirty logging has been stopped, do nothing for now. */ + if ((change != KVM_MR_DELETE) + && (old->flags & KVM_MEM_LOG_DIRTY_PAGES) + && (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))) { + kvm_mark_migration(kvm, 0); + return; + } +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); +// flush_apt_tlbs(kvm); + unmap_apt_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); +} + +/** + * kvm_alloc_addtional_stage_pgd - allocate level-1 table for addtional stage translation. + * @kvm: The KVM struct pointer for the VM. + * + * Allocates only the addtional stage HW PGD level table(s) (can support full + * 48-bit input addresses). Clears the allocated pages. + * + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm) +{ + pgd_t *pgd; + + if (kvm->arch.pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + /* Allocate the HW PGD, making sure that each page gets its own refcount */ + pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!pgd) + return -ENOMEM; + + kvm->arch.pgd = pgd; + return 0; +} + +/** + * kvm_free_apt_pgd - free all apt tables + * @kvm: The KVM struct pointer for the VM. + * + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. + */ +void kvm_free_apt_pgd(struct kvm *kvm) +{ + void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + unmap_apt_range(kvm, 0, KVM_PHYS_SIZE); + pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + } + spin_unlock(&kvm->mmu_lock); + + /* Free the HW pgd, one page at a time */ + if (pgd) + free_pages_exact(pgd, PAGE_SIZE); +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_free_apt_pgd(kvm); +} + +static void kvm_send_hwpoison_signal(unsigned long address, + struct vm_area_struct *vma) +{ + kernel_siginfo_t info; + + clear_siginfo(&info); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_MCEERR_AR; + info.si_addr = (void __user *)address; + + if (is_vm_hugetlb_page(vma)) + info.si_addr_lsb = huge_page_shift(hstate_vma(vma)); + else + info.si_addr_lsb = PAGE_SHIFT; + + send_sig_info(SIGBUS, &info, current); +} + +static bool fault_supports_apt_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, + unsigned long map_size) +{ + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + size_t size; + + /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ + if (map_size == PAGE_SIZE) + return true; + + size = memslot->npages * PAGE_SIZE; + + gpa_start = memslot->base_gfn << PAGE_SHIFT; + + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 + * PMD/PUD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SHIFT: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * apt_get_leaf_entry - walk the stage2 VM page tables and return + * true if a valid and present leaf-entry is found. A pointer to the + * leaf-entry is returned in the appropriate level variable - pudpp, + * pmdpp, ptepp. + */ +static bool apt_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + *pudpp = NULL; + *pmdpp = NULL; + *ptepp = NULL; + + pudp = apt_get_pud(kvm->arch.pgd, NULL, addr); + if (!pudp || pud_none(*pudp) || !pud_present(*pudp)) + return false; + + if (pud_huge(*pudp)) { + *pudpp = pudp; + return true; + } + + pmdp = pmd_offset(pudp, addr); + if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) + return false; + + if (pmd_trans_huge(*pmdp)) { + *pmdpp = pmdp; + return true; + } + + ptep = pte_offset_kernel(pmdp, addr); + if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) + return false; + + *ptepp = ptep; + return true; +} + +static bool apt_is_exec(struct kvm *kvm, phys_addr_t addr) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + bool found; + + found = apt_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); + if (!found) + return false; + + if (pudp) + return kvm_pud_exec(pudp); + else if (pmdp) + return kvm_pmd_exec(pmdp); + else + return kvm_pte_exec(ptep); +} + +static int apt_set_pte_fast(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + int inv_level = ((read_csr(CSR_AS_INFO)) >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + unsigned long inv_hpa = read_csr(CSR_AS_INFO) & AF_ENTRY_ADDR_MASK; + + VM_BUG_ON(logging_active && !cache); + + if (inv_level == 1) { + pud = (pud_t *)(inv_hpa | PAGE_OFFSET); + goto find_pud; + } else if (inv_level == 2) { + pmd = (pmd_t *)(inv_hpa | PAGE_OFFSET); + goto find_pmd; + } else if (inv_level == 3) { + pte = (pte_t *)(inv_hpa | PAGE_OFFSET); + goto find_pte; + } + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + +find_pud: + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + +find_pmd: + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + +find_pte: + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + +static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + + VM_BUG_ON(logging_active && !cache); + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + + + +static int apt_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + *cache, phys_addr_t addr, const pmd_t *new_pmd, unsigned long sz) +{ + pmd_t *pmd, old_pmd, *ori_pmd; + int i; +retry: + pmd = apt_get_pmd(kvm, cache, addr, sz); + VM_BUG_ON(!pmd); + ori_pmd = pmd; + old_pmd = *pmd; + if (pmd_present(old_pmd)) { + /* + * If we already have PTE level mapping for this block, + * we must unmap it to avoid inconsistent TLB state and + * leaking the table page. We could end up in this situation + * if the memory slot was marked for dirty logging and was + * reverted, leaving PTE level mappings for the pages accessed + * during the period. So, unmap the PTE level mapping for this + * block and retry, as we could have released the upper level + * table in the process. + * + * Normal THP split/merge follows mmu_notifier callbacks and do + * get handled accordingly. + */ + if (!pmd_trans_huge(old_pmd)) { + unmap_apt_range(kvm, addr & PMD_MASK, PMD_SIZE); + goto retry; + } + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge + * page, the individual subpages of that huge page + * should be unmapped through MMU notifiers before we + * get here. + * + * Merging of CompoundPages is not supported; they + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pmd)); + } + + /* Do we need WRITE_ONCE(pmd, new_pmd)? */ + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, ori_pmd++) + set_pmd(ori_pmd, *new_pmd); + } else + set_pmd(pmd, *new_pmd); + return 0; +} + +static int apt_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pud_t *new_pudp) +{ + pud_t *pudp, old_pud; + +retry: + pudp = apt_get_pud(kvm->arch.pgd, cache, addr); + VM_BUG_ON(!pudp); + + old_pud = *pudp; + + /* + * A large number of vcpus faulting on the same stage 2 entry, + * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). + * Skip updating the page tables if there is no change. + */ + if (pud_val(old_pud) == pud_val(*new_pudp)) + return 0; + + if (pud_present(old_pud)) { + /* + * If we already have table level mapping for this block, unmap + * the range for this block and retry. + */ + if (!pud_huge(old_pud)) { + unmap_apt_range(kvm, addr & PUD_MASK, PUD_SIZE); + goto retry; + } + +// WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pudp)); + } + + set_pud(pudp, *new_pudp); + return 0; +} + +static unsigned long +transparent_hugepage_adjust(struct kvm_memory_slot *memslot, + unsigned long hva, kvm_pfn_t *pfnp, + phys_addr_t *gpap) +{ + kvm_pfn_t pfn = *pfnp; + struct page *page = pfn_to_page(pfn); + + /* + * Make sure the adjustment is done only for THP pages. Also make + * sure that the HVA and IPA are sufficiently aligned and that the + * block map is contained within the memslot. + */ + if (!PageHuge(page) && PageTransCompoundMap(page) && + fault_supports_apt_huge_mapping(memslot, hva, PMD_SIZE)) { + /* + * The address we faulted on is backed by a transparent huge + * page. However, because we map the compound huge page and + * not the individual tail page, we need to transfer the + * refcount to the head page. We have to be careful that the + * THP doesn't start to split while we are adjusting the + * refcounts. + * + * We are sure this doesn't happen, because mmu_notifier_retry + * was successful and we are holding the mmu_lock, so if this + * THP is trying to split, it will be blocked in the mmu + * notifier before touching any of the pages, specifically + * before being able to call __split_huge_page_refcount(). + * + * We can therefore safely transfer the refcount from PG_tail + * to PG_head and switch the pfn from a tail page to the head + * page accordingly. + */ + *gpap &= PMD_MASK; + kvm_release_pfn_clean(pfn); + pfn &= ~(PTRS_PER_PMD - 1); + kvm_get_pfn(pfn); + *pfnp = pfn; + return PMD_SIZE; + } + + return PAGE_SIZE; +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_gpa, + struct kvm_memory_slot *memslot, unsigned long hva, + unsigned long fault_status) +{ + int ret; + bool write_fault, exec_fault, writable, force_pte = false; + unsigned long mmu_seq; + gfn_t gfn = fault_gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct vm_area_struct *vma; + kvm_pfn_t pfn; + pgprot_t mem_type = PAGE_READONLY; + bool logging_active = memslot_is_logging(memslot); + unsigned long vma_pagesize, flags = 0; + unsigned long as_info, access_type; + unsigned int vma_shift; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + write_fault = kvm_is_write_fault(access_type); + exec_fault = kvm_is_exec_fault(access_type); + VM_BUG_ON(write_fault && exec_fault); + + if (fault_status == AF_STATUS_FOR) { + kvm_err("Unexpected APT read permission error\n"); + return -EFAULT; + } + + /* Let's check if we will get back a huge page backed by hugetlbfs */ + down_read(¤t->mm->mmap_lock); + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_lock); + return -EFAULT; + } + + if (is_vm_hugetlb_page(vma)) + vma_shift = huge_page_shift(hstate_vma(vma)); + else + vma_shift = PAGE_SHIFT; + + vma_pagesize = 1ULL << vma_shift; + if (logging_active || (vma->vm_flags & VM_PFNMAP) || + !fault_supports_apt_huge_mapping(memslot, hva, vma_pagesize)) { + force_pte = true; + vma_pagesize = PAGE_SIZE; + } + + if (vma_pagesize == PMD_SIZE || vma_pagesize == CONT_PMD_SIZE || vma_pagesize == PUD_SIZE) + gfn = (fault_gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + up_read(¤t->mm->mmap_lock); + /* We need minimum second+third level pages */ + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); + if (ret) + return ret; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* + * Ensure the read of mmu_notifier_seq happens before we call + * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk + * the page we just got a reference to gets unmapped before we have a + * chance to grab the mmu_lock, which ensure that if the page gets + * unmapped afterwards, the call to kvm_unmap_hva will take it away + * from us again properly. This smp_rmb() interacts with the smp_wmb() + * in kvm_mmu_notifier_invalidate_. + */ + smp_rmb(); + + pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(hva, vma); + return 0; + } + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + if (logging_active) { + /* + * Faults on pages in a memslot with logging enabled + * should not be mapped with huge pages (it introduces churn + * and performance degradation), so force a pte mapping. + */ + flags |= KVM_APT_FLAG_LOGGING_ACTIVE; + + /* + * Only actually map the page as writable if this was a write + * fault. + */ + if (!write_fault) + writable = false; + } + + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) + goto out_unlock; + + /* + * If we are not forced to use page mapping, check if we are + * backed by a THP and thus use block mapping if possible. + */ + if (vma_pagesize == PAGE_SIZE && !force_pte) { + vma_pagesize = transparent_hugepage_adjust(memslot, hva, + &pfn, &fault_gpa); + } + + if (vma_pagesize == PUD_SIZE) { + pud_t new_pud = pfn_pud(pfn, mem_type); + + new_pud = pud_mkhuge(new_pud); + + if (writable) { + new_pud = kvm_pud_mkwrite(new_pud); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pud = kvm_pud_mkexec(new_pud); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pud = kvm_pud_mkexec(new_pud); + } + + ret = apt_set_pud_huge(kvm, memcache, fault_gpa, &new_pud); + } else if (vma_pagesize == CONT_PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + new_pmd = pmd_mkcont(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else if (vma_pagesize == PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else { + pte_t new_pte = pfn_pte(pfn, mem_type); + + if (writable) { + new_pte = kvm_pte_mkwrite(new_pte); + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pte = kvm_pte_mkexec(new_pte); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pte = kvm_pte_mkexec(new_pte); + } + + ret = apt_set_pte_fast(kvm, memcache, fault_gpa, &new_pte, flags); + if (!ret) + goto out_unlock; + } + +out_unlock: + spin_unlock(&kvm->mmu_lock); + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); + return ret; +} + +/** + * kvm_handle_guest_abort - handles all 2nd stage aborts + * @vcpu: the VCPU pointer + * @run: the kvm_run structure + * + * Any abort that gets to the host is almost guaranteed to be caused by a + * missing second stage translation table entry, which can mean that either the + * guest simply needs more memory and we must allocate an appropriate page or it + * can mean that the guest tried to access I/O memory, which is emulated by user + * space. The distinction is based on the IPA causing the fault and whether this + * memory region has been registered as standard RAM by user space. + */ +#ifdef CONFIG_SUBARCH_C4 +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long as_info; /* the value of CSR: AS_INFO */ + unsigned int access_type, inv_level; + unsigned int fault_status; + unsigned long fault_entry_addr; + phys_addr_t fault_gpa; + struct kvm_memory_slot *memslot; + unsigned long hva; + bool write_fault, writable; + gfn_t gfn; + + int ret, idx; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + inv_level = (as_info >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + fault_status = (as_info >> AF_FAULT_STATUS_SHIFT) & AF_FAULT_STATUS_MASK; + fault_entry_addr = (as_info & AF_ENTRY_ADDR_MASK) >> 3; + + fault_gpa = read_csr(CSR_EXC_GPA); + idx = srcu_read_lock(&vcpu->kvm->srcu); + + gfn = fault_gpa >> PAGE_SHIFT; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); + + write_fault = kvm_is_write_fault(access_type); + + /* The memory slot for IO doesn't register in memory region + * with kvm, if hva == KVM_HVA_ERR_BAD, the gpa used for MMIO + * needs emulation. + */ + + if (hva == KVM_HVA_ERR_BAD) { + ret = io_mem_abort(vcpu, run, NULL); + goto out_unlock; + } + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_gpa >= KVM_PHYS_SIZE); + + ret = user_mem_abort(vcpu, fault_gpa, memslot, hva, fault_status); + if (ret == 0) + ret = 1; +out_unlock: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; +} +#endif +static int handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, + int (*handler)(struct kvm *kvm, gpa_t gpa, u64 size, void *data), + void *data) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = 0; + + slots = kvm_memslots(kvm); + + /* we only care about the pages that the guest sees */ + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gpa; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + + gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; + ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); + } + + return ret; +} + +static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + unmap_apt_range(kvm, gpa, size); + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end, bool blockable) +{ + if (!kvm->arch.pgd) + return 0; + + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + return 1; +} + +static int apt_ptep_test_and_clear_young(pte_t *pte) +{ + if (pte_young(*pte)) { + *pte = pte_mkold(*pte); + return 1; + } + return 0; +} + +static int apt_pmdp_test_and_clear_young(pmd_t *pmd) +{ + return apt_ptep_test_and_clear_young((pte_t *)pmd); +} + +static int apt_pudp_test_and_clear_young(pud_t *pud) +{ + return apt_ptep_test_and_clear_young((pte_t *)pud); +} + +static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + if (!kvm->arch.pgd) + return 0; + + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.pgd) + return 0; + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); +} + +static int kvm_set_apte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pte_t *pte = (pte_t *)data; + + WARN_ON(size != PAGE_SIZE); + + apt_set_pte(kvm, NULL, gpa, pte, 0); + return 0; +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + unsigned long end = hva + PAGE_SIZE; + pte_t apt_pte; + + if (!kvm->arch.pgd) + return 0; + + apt_pte = pte_wrprotect(pte); + handle_hva_to_gpa(kvm, hva, end, &kvm_set_apte_handler, &apt_pte); + return 0; +} + +/** + * kvm_mmu_write_protect_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire kvm_mmu_lock. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + phys_addr_t base_gfn = slot->base_gfn + gfn_offset; + phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; + phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; + + apt_wp_range(kvm, start, end); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * dirty pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} diff --git a/arch/sw_64/kvm/perf.c b/arch/sw_64/kvm/perf.c new file mode 100644 index 000000000000..730dd1feeccf --- /dev/null +++ b/arch/sw_64/kvm/perf.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for KVM. + */ + +#include +#include + +#include + + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.regs.ps & 8) != 0; +} + +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.regs.pc; +} + + + +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c new file mode 100644 index 000000000000..f6bfb2452938 --- /dev/null +++ b/arch/sw_64/kvm/sw64.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "irq.h" + +bool set_msi_flag; + + + +static unsigned long get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) +{ + unsigned long vpn = last_vpn(cpu); + unsigned long next = vpn + 1; + + if ((vpn & VPN_MASK) >= VPN_MASK) { + kvm_flush_tlb_all(); + next = (vpn & ~VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ + } + last_vpn(cpu) = next; + return next; +} + +int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) +{ + set_bit(number, (vcpu->arch.irqs_pending)); + kvm_vcpu_kick(vcpu); + return 0; +} + +int kvm_arch_check_processor_compat(void *opaque) +{ + return 0; +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + unsigned int vcid; + unsigned int vcpu_idx; + struct kvm_vcpu *vcpu = NULL; + int irq = e->msi.data & 0xff; + + vcid = (e->msi.address_lo & VT_MSIX_ADDR_DEST_ID_MASK) >> VT_MSIX_ADDR_DEST_ID_SHIFT; + vcpu_idx = vcid & 0x1f; + vcpu = kvm_get_vcpu(kvm, vcpu_idx); + + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq, true); +} + +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + unsigned long vpnc; + long cpu = smp_processor_id(); + + vpn = last_vpn(cpu); + vpnc = vcpu->arch.vpnc[cpu]; + + if ((vpnc ^ vpn) & ~VPN_MASK) { + /* vpnc and cpu vpn not in the same version, get new vpnc and vpn */ + vpnc = get_new_vpn_context(vcpu, cpu); + vcpu->arch.vpnc[cpu] = vpnc; + } + + vpn = vpnc & VPN_MASK; + + /* Always update vpn */ + /* Just setup vcb, hardware CSR will be changed later in HMcode */ + kvm_sw64_update_vpn(vcpu, vpn); + + /* + * If vcpu migrate to a new physical cpu, the new physical cpu may keep + * old tlb entries for this vcpu's vpn, upn in the old tlb entries and + * current vcpu's upn may not in the same version. + * For now, we don't know the vcpu's upn version and the current version. + * If we keep track of the vcpu's upn version, the TLB-flush could be less. + * To be safe and correct, flush all tlb entries of current vpn for now. + */ + + if (vcpu->arch.pcpu_id != cpu) { + tbivpn(0, 0, vpn); + vcpu->arch.pcpu_id = cpu; + vcpu->cpu = cpu; + } +} + +void check_vcpu_requests(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + long cpu = smp_processor_id(); + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + vpn = vcpu->arch.vpnc[cpu] & VPN_MASK; + tbivpn(0, 0, vpn); + } + } +} + + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return ((!bitmap_empty(vcpu->arch.irqs_pending, SWVM_IRQS) || !vcpu->arch.halted) + && !vcpu->arch.power_off); +} + +int kvm_arch_hardware_enable(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r = 0; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_SYNC_MMU: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(SW64_KVM_IRQ_TIMER, vcpu->arch.irqs_pending); +} + +int kvm_arch_hardware_setup(void *opaque) +{ + return 0; +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + + return kvm_sw64_init_vm(kvm); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + return kvm_sw64_destroy_vm(kvm); +} + +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + kvm_mmu_free_memory_caches(vcpu); + hrtimer_cancel(&vcpu->arch.hrt); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + /* Set up the timer for Guest */ + pr_info("vcpu: [%d], regs addr = %#lx, vcpucb = %#lx\n", vcpu->vcpu_id, + (unsigned long)&vcpu->arch.regs, (unsigned long)&vcpu->arch.vcb); + vcpu->arch.vtimer_freq = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + hrtimer_init(&vcpu->arch.hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); + vcpu->arch.hrt.function = clockdev_fn; + vcpu->arch.tsk = current; + + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + + return 0; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + e->msi.flags = ue->flags; + e->msi.devid = ue->u.msi.devid; + set_msi_flag = true; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; /* not implemented yet */ +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return 0; +} + + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->cpu = cpu; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* + * The arch-generic KVM code expects the cpu field of a vcpu to be -1 + * if the vcpu is no longer assigned to a cpu. This is used for the + * optimized make_all_cpus_request path. + */ + vcpu->cpu = -1; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(&(vcpu->arch.regs), regs, sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(regs, &(vcpu->arch.regs), sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return 0; +} + + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + struct vcpucb *vcb = &(vcpu->arch.vcb); + struct hcall_args hargs; + int irq, ret; + bool more; + sigset_t sigsaved; + + /* Set guest vcb */ + /* vpn will update later when vcpu is running */ + vcpu_set_numa_affinity(vcpu); +#ifdef CONFIG_PERF_EVENTS + vcpu_load(vcpu); +#endif + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (run->exit_reason == KVM_EXIT_MMIO) + kvm_handle_mmio_return(vcpu, run); + + run->exit_reason = KVM_EXIT_UNKNOWN; + ret = 1; + while (ret > 0) { + /* Check conditions before entering the guest */ + cond_resched(); + + preempt_disable(); + local_irq_disable(); + + if (signal_pending(current)) { + ret = -EINTR; + run->exit_reason = KVM_EXIT_INTR; + } + + if (ret <= 0) { + local_irq_enable(); + preempt_enable(); + continue; + } + + memset(&hargs, 0, sizeof(hargs)); + + clear_vcpu_irq(vcpu); + + if (vcpu->arch.restart == 1) { + /* handle reset vCPU */ + vcpu->arch.regs.pc = GUEST_RESET_PC; + vcpu->arch.restart = 0; + } + + irq = interrupt_pending(vcpu, &more); + if (irq < SWVM_IRQS) + try_deliver_interrupt(vcpu, irq, more); + + vcpu->arch.halted = 0; + + sw64_kvm_switch_vpn(vcpu); + check_vcpu_requests(vcpu); + guest_enter_irqoff(); + + /* update aptp before the guest runs */ + update_aptp((unsigned long)vcpu->kvm->arch.pgd); + + /* Enter the guest */ + trace_kvm_sw64_entry(vcpu->vcpu_id, vcpu->arch.regs.pc); + vcpu->mode = IN_GUEST_MODE; + + ret = __sw64_vcpu_run(__pa(vcb), &(vcpu->arch.regs), &hargs); + + /* Back from guest */ + vcpu->mode = OUTSIDE_GUEST_MODE; + + local_irq_enable(); + guest_exit_irqoff(); + + trace_kvm_sw64_exit(ret, vcpu->arch.regs.pc); + + preempt_enable(); + + /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ + ret = handle_exit(vcpu, run, ret, &hargs); + } + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + +#ifdef CONFIG_PERF_EVENTS + vcpu_put(vcpu); +#endif + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + int r; + + switch (ioctl) { + case KVM_SW64_VCPU_INIT: + r = kvm_sw64_vcpu_reset(vcpu); + break; + case KVM_SW64_GET_VCB: + r = kvm_sw64_get_vcb(filp, arg); + break; + case KVM_SW64_SET_VCB: + r = kvm_sw64_set_vcb(filp, arg); + break; + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm *kvm __maybe_unused = filp->private_data; + long r; + + switch (ioctl) { + case KVM_CREATE_IRQCHIP: { + struct kvm_irq_routing_entry routing; + + r = -EINVAL; + memset(&routing, 0, sizeof(routing)); + r = kvm_set_irq_routing(kvm, &routing, 0, 0); + break; + } + default: + r = -ENOIOCTLCMD; + } + return r; +} + +int kvm_arch_init(void *opaque) +{ + return 0; +} + +void kvm_arch_exit(void) +{ +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + /* Let implementation handle TLB/GVA invalidation */ + kvm_arch_flush_shadow_memslot(kvm, memslot); +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_IOEVENTFD: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type) +{ + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(vcpu->kvm, target_vcpuid); + + if (type == II_RESET) + target_vcpu->arch.restart = 1; + + if (target_vcpu != NULL) + vcpu_interrupt_line(target_vcpu, 1, 1); +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, + bool line_status) +{ + u32 irq = irq_level->irq; + unsigned int irq_num; + struct kvm_vcpu *vcpu = NULL; + bool level = irq_level->level; + + irq_num = irq; + /* target core for Intx is core0 */ + vcpu = kvm_get_vcpu(kvm, 0); + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq_num, level); +} + + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + + + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} diff --git a/arch/sw_64/kvm/trace.h b/arch/sw_64/kvm/trace.h new file mode 100644 index 000000000000..2611df3d3fa5 --- /dev/null +++ b/arch/sw_64/kvm/trace.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_SW64_KVM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SW64_KVM_TRACE_H + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_sw64_entry, + TP_PROTO(unsigned int vcpu_id, unsigned int vcpu_pc), + TP_ARGS(vcpu_id, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, vcpu_pc) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("VCPU %u: PC: 0x%08x", __entry->vcpu_id, __entry->vcpu_pc) +); + +/* + * Tracepoint for guest mode exit. + */ + +TRACE_EVENT(kvm_sw64_exit, + TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), + TP_ARGS(exit_reason, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, exit_reason) + __field(unsigned long, vcpu_pc) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("exit_reason: 0x%04x (%11s), PC: 0x%08lx", + __entry->exit_reason, + __print_symbolic(__entry->exit_reason, kvm_sw64_exception_type), + __entry->vcpu_pc) +); + +#endif /* _SW64_KVM_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c new file mode 100644 index 000000000000..688449b65fa5 --- /dev/null +++ b/arch/sw_64/kvm/vmem.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +static bool addr_in_pool(struct gen_pool *pool, + unsigned long start, size_t size) +{ + bool found = false; + unsigned long end = start + size - 1; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vm_flags_init(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + +static void vmem_vm_open(struct vm_area_struct *vma) +{ + struct vmem_info *info = vma->vm_private_data; + + atomic_inc(&info->refcnt); +} + +static void vmem_vm_close(struct vm_area_struct *vma) +{ + unsigned long addr; + size_t size; + struct vmem_info *info; + + info = vma->vm_private_data; + addr = info->start; + size = round_up(info->size, 8 << 20); + + if (atomic_dec_and_test(&info->refcnt)) { + if (sw64_kvm_pool && addr_in_pool(sw64_kvm_pool, addr, size)) { + pr_info("gen pool free addr: %#lx, size: %#lx\n", + addr, size); + gen_pool_free(sw64_kvm_pool, addr, size); + } + kfree(info); + } +} + +const struct vm_operations_struct vmem_vm_ops = { + .open = vmem_vm_open, + .close = vmem_vm_close, +}; +EXPORT_SYMBOL_GPL(vmem_vm_ops); + +static int vmem_open(struct inode *inode, struct file *flip) +{ + flip->private_data = NULL; + return 0; +} + +static loff_t vmem_llseek(struct file *filp, loff_t offset, int whence) +{ + loff_t newpos = 256UL << 30; + return newpos; +} + +static int vmem_release(struct inode *inode, struct file *flip) +{ + return 0; +} + +static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) +{ + unsigned long addr; + static struct vmem_info *info; + size_t size = vma->vm_end - vma->vm_start; + int ret; + + if (!(vma->vm_flags & VM_SHARED)) { + pr_err("%s: mapping must be shared\n", __func__); + return -EINVAL; + } + + if (!sw64_kvm_pool) + return -ENOMEM; + + if (flip->private_data == NULL) { + addr = gen_pool_alloc(sw64_kvm_pool, round_up(size, 8 << 20)); + if (!addr) + return -ENOMEM; + + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + pr_info("guest phys addr=%#lx, size=%#lx\n", addr, size); + info->start = addr; + info->size = size; + flip->private_data = (void *)info; + } else { + info = flip->private_data; + addr = info->start; + } + + vma->vm_private_data = (void *)info; + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + /*to do if size bigger than vm_mem_size*/ + pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); + + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; + + return 0; +} + +static const struct file_operations vmem_fops = { + .owner = THIS_MODULE, + .open = vmem_open, + .llseek = vmem_llseek, + .release = vmem_release, + .mmap = vmem_mmap, +}; + +static struct miscdevice vmem_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw64_vmem", + .fops = &vmem_fops, +}; + +int __init vmem_init(void) +{ + int err; + + err = misc_register(&vmem_dev); + if (err != 0) { + pr_err("Could not register sw64_vmem device\n"); + return err; + } + return 0; +} + +void vmem_exit(void) +{ + misc_deregister(&vmem_dev); +} -- Gitee From 4b2ad1431cad4fb031838d1e4f3cdc11a866b90c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:35 +0800 Subject: [PATCH 318/953] anolis: sw64: add stacktrace support ANBZ: #4688 Add stacktrace support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/stacktrace.h | 72 ++++++++ arch/sw_64/kernel/stacktrace.c | 247 ++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+) create mode 100644 arch/sw_64/include/asm/stacktrace.h create mode 100644 arch/sw_64/kernel/stacktrace.c diff --git a/arch/sw_64/include/asm/stacktrace.h b/arch/sw_64/include/asm/stacktrace.h new file mode 100644 index 000000000000..958c9892fd6d --- /dev/null +++ b/arch/sw_64/include/asm/stacktrace.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_STACKTRACE_H +#define _ASM_SW64_STACKTRACE_H + +#include +#include +#include +#include +#include + +struct stackframe { + unsigned long pc; + unsigned long fp; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + unsigned long return_address; + struct stack_frame *next_frame; +}; + +extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); +extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data); + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; +} + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) +{ + if (on_task_stack(tsk, sp, info)) + return true; + if (tsk != current || preemptible()) + return false; + + return false; +} + +#endif /* _ASM_SW64_STACKTRACE_H */ diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c new file mode 100644 index 000000000000..ff00506d5b82 --- /dev/null +++ b/arch/sw_64/kernel/stacktrace.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stack trace management functions + * + * Copyright (C) 2018 snyh + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * sw_64 PCS assigns the frame pointer to r15. + * + * A simple function prologue looks like this: + * ldi sp,-xx(sp) + * stl ra,0(sp) + * stl fp,8(sp) + * mov sp,fp + * + * A simple function epilogue looks like this: + * mov fp,sp + * ldl ra,0(sp) + * ldl fp,8(sp) + * ldi sp,+xx(sp) + */ + +#ifdef CONFIG_FRAME_POINTER + +int unwind_frame(struct task_struct *tsk, struct stackframe *frame) +{ + unsigned long fp = frame->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!tsk) + tsk = current; + + if (!on_accessible_stack(tsk, fp, NULL)) + return -EINVAL; + + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + + /* + * Frames created upon entry from user have NULL FP and PC values, so + * don't bother reporting these. Frames created by __noreturn functions + * might have a valid FP even if PC is bogus, so only terminate where + * both are NULL. + */ + if (!frame->fp && !frame->pc) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(unwind_frame); + +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long pc, fp; + + struct stackframe frame; + + if (regs) { + unsigned long offset; + + pc = regs->pc; + fp = regs->regs[15]; + if (kallsyms_lookup_size_offset(pc, NULL, &offset) + && offset < 16) { + /* call stack has not been setup + * store pc first then loop from ra + */ + if (fn(pc, data)) + return; + pc = regs->regs[26]; + } + } else if (tsk == current || tsk == NULL) { + fp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)walk_stackframe; + } else { + fp = tsk->thread.s[6]; + pc = tsk->thread.ra; + } + + if (!__kernel_text_address(pc) || fn(pc, data)) + return; + + frame.pc = pc; + frame.fp = fp; + while (1) { + int ret; + + ret = unwind_frame(tsk, &frame); + if (ret < 0) + break; + + if (fn(frame.pc, data)) + break; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#else /* !CONFIG_FRAME_POINTER */ +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long *ksp; + unsigned long sp, pc; + + if (regs) { + sp = (unsigned long)(regs+1); + pc = regs->pc; + } else if (tsk == current || tsk == NULL) { + register unsigned long current_sp __asm__ ("$30"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + sp = tsk->thread.sp; + pc = tsk->thread.ra; + } + + ksp = (unsigned long *)sp; + + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && fn(pc, data)) + break; + pc = *ksp++; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#endif/* CONFIG_FRAME_POINTER */ + +static int print_address_trace(unsigned long pc, void *data) +{ + print_ip_sym((const char *)data, pc); + return 0; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_info("Trace:\n"); + walk_stackframe(task, NULL, print_address_trace, (void *)loglvl); +} + +#ifdef CONFIG_STACKTRACE +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +struct stack_trace_data { + struct stack_trace *trace; + unsigned int nosched; +}; + +int save_trace(unsigned long pc, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + + if (data->nosched && in_sched_functions(pc)) + return 0; + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = 0; + + walk_stackframe(current, regs, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +static void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = nosched; + + walk_stackframe(tsk, NULL, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current, trace, 0); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif + +static int save_pc(unsigned long pc, void *data) +{ + unsigned long *p = data; + *p = 0; + + if (!in_sched_functions(pc)) + *p = pc; + + return *p; +} + +unsigned long __get_wchan(struct task_struct *tsk) +{ + unsigned long pc; + + if (!tsk || tsk == current || task_is_running(tsk)) + return 0; + walk_stackframe(tsk, NULL, save_pc, &pc); + + return pc; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + return 0; +} +#endif -- Gitee From 93daf8a6cf827e461b07a2d529fa65b093182b60 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:31 +0800 Subject: [PATCH 319/953] anolis: sw64: add qspinlock support ANBZ: #4688 Add qspinlock support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/spinlock.h | 24 ++++++++++++++++++++++++ arch/sw_64/include/asm/spinlock_types.h | 8 ++++++++ 2 files changed, 32 insertions(+) create mode 100644 arch/sw_64/include/asm/spinlock.h create mode 100644 arch/sw_64/include/asm/spinlock_types.h diff --git a/arch/sw_64/include/asm/spinlock.h b/arch/sw_64/include/asm/spinlock.h new file mode 100644 index 000000000000..64358f32cd9a --- /dev/null +++ b/arch/sw_64/include/asm/spinlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_SPINLOCK_H +#define _ASM_SW64_SPINLOCK_H + +#include +#include + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* _ASM_SW64_SPINLOCK_H */ diff --git a/arch/sw_64/include/asm/spinlock_types.h b/arch/sw_64/include/asm/spinlock_types.h new file mode 100644 index 000000000000..62e554e4f48c --- /dev/null +++ b/arch/sw_64/include/asm/spinlock_types.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPINLOCK_TYPES_H +#define _ASM_SW64_SPINLOCK_TYPES_H + +#include +#include + +#endif /* _ASM_SW64_SPINLOCK_TYPES_H */ -- Gitee From 95fc120e050827376bc96d309b2060d7b7a991a7 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:30 +0800 Subject: [PATCH 320/953] anolis: sw64: add perf events support ANBZ: #4688 Add basic perf events support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/perf_event.h | 16 + arch/sw_64/include/asm/pmc.h | 55 ++ arch/sw_64/include/uapi/asm/perf_regs.h | 41 ++ arch/sw_64/kernel/perf_event.c | 787 ++++++++++++++++++++++++ arch/sw_64/kernel/perf_regs.c | 33 + 5 files changed, 932 insertions(+) create mode 100644 arch/sw_64/include/asm/perf_event.h create mode 100644 arch/sw_64/include/asm/pmc.h create mode 100644 arch/sw_64/include/uapi/asm/perf_regs.h create mode 100644 arch/sw_64/kernel/perf_event.c create mode 100644 arch/sw_64/kernel/perf_regs.c diff --git a/arch/sw_64/include/asm/perf_event.h b/arch/sw_64/include/asm/perf_event.h new file mode 100644 index 000000000000..dc55a361babd --- /dev/null +++ b/arch/sw_64/include/asm/perf_event.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERF_EVENT_H +#define _ASM_SW64_PERF_EVENT_H + +#include +#include + +#ifdef CONFIG_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs +#endif + +#endif /* _ASM_SW64_PERF_EVENT_H */ diff --git a/arch/sw_64/include/asm/pmc.h b/arch/sw_64/include/asm/pmc.h new file mode 100644 index 000000000000..d5672dd940a7 --- /dev/null +++ b/arch/sw_64/include/asm/pmc.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for use with the sw64 PMC interface. + */ + +#ifndef _ASM_SW64_PMC_H +#define _ASM_SW64_PMC_H + +#define PMC_PC0 0 +#define PMC_PC1 1 + +/* Following commands are implemented on all CPUs */ +#define PMC_CMD_DISABLE 0 +#define PMC_CMD_ENABLE 1 +#define PMC_CMD_EVENT_BASE 2 +#define PMC_CMD_PM 4 +#define PMC_CMD_READ 5 +#define PMC_CMD_READ_CLEAR 6 +#define PMC_CMD_WRITE_BASE 7 + +#define PMC_DISABLE_BASE 1 + +#define PMC_ENABLE_BASE 1 + +#define PC0_RAW_BASE 0x0 +#define PC1_RAW_BASE 0x100 +#define PC0_MAX 0xF +#define PC1_MAX 0x3D + +#define SW64_PERFCTRL_KM 2 +#define SW64_PERFCTRL_UM 3 +#define SW64_PERFCTRL_AM 4 + +/* pc0 events */ +#define PC0_INSTRUCTIONS 0x0 +#define PC0_BRANCH_INSTRUCTIONS 0x3 +#define PC0_CPU_CYCLES 0x8 +#define PC0_ITB_READ 0x9 +#define PC0_DTB_READ 0xA +#define PC0_ICACHE_READ 0xB +#define PC0_DCACHE_READ 0xC +#define PC0_SCACHE_REFERENCES 0xD + +/* pc1 events */ +#define PC1_BRANCH_MISSES 0xB +#define PC1_SCACHE_MISSES 0x10 +#define PC1_ICACHE_READ_MISSES 0x16 +#define PC1_ITB_MISSES 0x17 +#define PC1_DTB_SINGLE_MISSES 0x30 +#define PC1_DCACHE_MISSES 0x32 + +#define MAX_HWEVENTS 2 +#define PMC_COUNT_MASK ((1UL << 58) - 1) + +#endif /* _ASM_SW64_PMC_H */ diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..871ad4663d1d --- /dev/null +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_SW64_PERF_REGS_H +#define _UAPI_ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _UAPI_ASM_SW64_PERF_REGS_H */ diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c new file mode 100644 index 000000000000..83bb051be9de --- /dev/null +++ b/arch/sw_64/kernel/perf_event.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SW64 platforms. + * + * This code is based upon riscv and sparc perf event code. + */ + +#include +#include + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; +}; + +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct sw64_perf_event { + /* pmu index */ + int counter; + /* events selector */ + int event; +}; + +/* + * A structure to hold the description of the PMCs available on a particular + * type of SW64 CPU. + */ +struct sw64_pmu_t { + /* generic hw/cache events table */ + const struct sw64_perf_event *hw_events; + const struct sw64_perf_event (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + + /* method used to map hw/cache events */ + const struct sw64_perf_event *(*map_hw_event)(u64 config); + const struct sw64_perf_event *(*map_cache_event)(u64 config); + + /* The number of entries in the hw_event_map */ + int max_events; + + /* The number of counters on this pmu */ + int num_pmcs; + + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask; + + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period; + + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left; + + /* Subroutine for checking validity of a raw event for this PMU. */ + bool (*raw_event_valid)(u64 config); +}; + +/* + * The SW64 PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct sw64_pmu_t *sw64_pmu; + +/* + * SW64 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +#define SW64_OP_UNSUP {-1, -1} + +/* Mapping of the hw event types to the perf tool interface */ +static const struct sw64_perf_event core3_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = {PMC_PC0, PC0_CPU_CYCLES}, + [PERF_COUNT_HW_INSTRUCTIONS] = {PMC_PC0, PC0_INSTRUCTIONS}, + [PERF_COUNT_HW_CACHE_REFERENCES] = {PMC_PC0, PC0_SCACHE_REFERENCES}, + [PERF_COUNT_HW_CACHE_MISSES] = {PMC_PC1, PC1_SCACHE_MISSES}, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {PMC_PC0, PC0_BRANCH_INSTRUCTIONS}, + [PERF_COUNT_HW_BRANCH_MISSES] = {PMC_PC1, PC1_BRANCH_MISSES}, +}; + +/* Mapping of the hw cache event types to the perf tool interface */ +#define C(x) PERF_COUNT_HW_CACHE_##x +static const struct sw64_perf_event core3_cache_event_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DCACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DCACHE_MISSES} + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ICACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ICACHE_READ_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DTB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DTB_SINGLE_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ITB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ITB_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + +}; + +static const struct sw64_perf_event *core3_map_hw_event(u64 config) +{ + return &sw64_pmu->hw_events[config]; +} + +static const struct sw64_perf_event *core3_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct sw64_perf_event *perf_event; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + perf_event = &((*sw64_pmu->cache_events)[cache_type][cache_op][cache_result]); + if (perf_event->counter == -1) /* SW64_OP_UNSUP */ + return ERR_PTR(-ENOENT); + + return perf_event; +} + +/* + * r0xx for counter0, r1yy for counter1. + * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D + */ +static bool core3_raw_event_valid(u64 config) +{ + if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) || + (config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX))) + return true; + + pr_info("sw64 pmu: invalid raw event config %#llx\n", config); + return false; +} + +static const struct sw64_pmu_t core3_pmu = { + .max_events = ARRAY_SIZE(core3_hw_event_map), + .hw_events = core3_hw_event_map, + .map_hw_event = core3_map_hw_event, + .cache_events = &core3_cache_event_map, + .map_cache_event = core3_map_cache_event, + .num_pmcs = MAX_HWEVENTS, + .pmc_count_mask = PMC_COUNT_MASK, + .pmc_max_period = PMC_COUNT_MASK, + .pmc_left = 4, + .raw_event_valid = core3_raw_event_valid, +}; + +/* + * Low-level functions: reading/writing counters + */ +static void sw64_write_pmc(int idx, unsigned long val) +{ + wrperfmon(PMC_CMD_WRITE_BASE + idx, val); +} + +static unsigned long sw64_read_pmc(int idx) +{ + return wrperfmon(PMC_CMD_READ, idx); +} + +/* Set a new period to sample over */ +static int sw64_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = local64_read(&hwc->period_left); + long period = hwc->sample_period; + int overflow = 0; + unsigned long value; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (left > (long)sw64_pmu->pmc_max_period) + left = sw64_pmu->pmc_max_period; + + value = sw64_pmu->pmc_max_period - left; + local64_set(&hwc->prev_count, value); + sw64_write_pmc(idx, value); + + perf_event_update_userpage(event); + + return overflow; +} + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long sw64_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sw64_read_pmc(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & sw64_pmu->pmc_count_mask)) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) + delta += sw64_pmu->pmc_max_period + 1; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +/* + * State transition functions: + * + * add()/del() & start()/stop() + * + */ + +/* + * pmu->start: start the event. + */ +static void sw64_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + sw64_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + + /* counting in selected modes, for both counters */ + wrperfmon(PMC_CMD_PM, hwc->config_base); + wrperfmon(PMC_CMD_EVENT_BASE + hwc->idx, hwc->event_base); + wrperfmon(PMC_CMD_ENABLE, PMC_ENABLE_BASE + hwc->idx); +} + +/* + * pmu->stop: stop the counter + */ +static void sw64_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + wrperfmon(PMC_CMD_DISABLE, PMC_DISABLE_BASE + hwc->idx); + hwc->state |= PERF_HES_STOPPED; + barrier(); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + sw64_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * pmu->add: add the event to PMU. + */ +static int sw64_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int err = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + if (__test_and_set_bit(hwc->idx, cpuc->used_mask)) { + err = -ENOSPC; + goto out; + } + + cpuc->event[hwc->idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + sw64_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + local_irq_restore(irq_flags); + + return err; +} + +/* + * pmu->del: delete the event from PMU. + */ +static void sw64_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + sw64_pmu_stop(event, PERF_EF_UPDATE); + cpuc->event[hwc->idx] = NULL; + __clear_bit(event->hw.idx, cpuc->used_mask); + + /* Absorb the final count and turn off the event. */ + perf_event_update_userpage(event); + + local_irq_restore(irq_flags); +} + +/* + * pmu->read: read and update the counter + */ +static void sw64_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + sw64_perf_event_update(event, hwc, hwc->idx, 0); +} + +static bool supported_cpu(void) +{ + return true; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct sw64_perf_event *event_type; + + + /* + * SW64 does not have per-counter usr/os/guest/host bits, + * we can distinguish exclude_user and exclude_kernel by + * sample mode. + */ + if (event->attr.exclude_hv || event->attr.exclude_idle || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + /* + * SW64 does not support precise ip feature, and system hang when + * detecting precise_ip by perf_event_attr__set_max_precise_ip + * in userspace + */ + if (attr->precise_ip != 0) + return -EOPNOTSUPP; + + /* SW64 has fixed counter for given event type */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sw64_pmu->max_events) + return -EINVAL; + event_type = sw64_pmu->map_hw_event(attr->config); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + event_type = sw64_pmu->map_cache_event(attr->config); + if (IS_ERR(event_type)) /* */ + return PTR_ERR(event_type); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else { /* PERF_TYPE_RAW */ + if (!sw64_pmu->raw_event_valid(attr->config)) + return -EINVAL; + hwc->idx = attr->config >> 8; /* counter selector */ + hwc->event_base = attr->config & 0xff; /* event selector */ + } + + hwc->config_base = SW64_PERFCTRL_AM; + + if (attr->exclude_user) + hwc->config_base = SW64_PERFCTRL_KM; + if (attr->exclude_kernel) + hwc->config_base = SW64_PERFCTRL_UM; + + hwc->config = attr->config; + + if (!is_sampling_event(event)) + pr_debug("not sampling event\n"); + + event->destroy = hw_perf_event_destroy; + + if (!hwc->sample_period) { + hwc->sample_period = sw64_pmu->pmc_max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +/* + * Main entry point to initialise a HW performance event. + */ +static int sw64_pmu_event_init(struct perf_event *event) +{ + int err; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + default: + return -ENOENT; + } + + if (!sw64_pmu) + return -ENODEV; + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + return err; +} + +static struct pmu pmu = { + .name = "core3-base", + .capabilities = PERF_PMU_CAP_NO_NMI, + .event_init = sw64_pmu_event_init, + .add = sw64_pmu_add, + .del = sw64_pmu_del, + .start = sw64_pmu_start, + .stop = sw64_pmu_stop, + .read = sw64_pmu_read, +}; + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr0 = wrperfmon(PMC_CMD_READ, PMC_PC0); + pcr1 = wrperfmon(PMC_CMD_READ, PMC_PC1); + + pr_info("CPU#%d: PCTR0[%lx] PCTR1[%lx]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + +static void sw64_perf_event_irq_handler(unsigned long idx, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + + __this_cpu_inc(irq_pmi_count); + cpuc = this_cpu_ptr(&cpu_hw_events); + + event = cpuc->event[idx]; + + if (unlikely(!event)) { + irq_err_count++; + return; + } + + hwc = &event->hw; + sw64_perf_event_update(event, hwc, idx, sw64_pmu->pmc_max_period + 1); + perf_sample_data_init(&data, 0, hwc->last_period); + + if (sw64_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + sw64_pmu_stop(event, 0); + } + } +} + +bool valid_utext_addr(unsigned long addr) +{ + return addr >= current->mm->start_code && addr <= current->mm->end_code; +} + +bool valid_dy_addr(unsigned long addr) +{ + bool ret = false; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + + if (addr > TASK_SIZE || addr < TASK_UNMAPPED_BASE) + return ret; + vma = find_vma(mm, addr); + if (vma && vma->vm_start <= addr && (vma->vm_flags & VM_EXEC)) + ret = true; + return ret; +} + +#ifdef CONFIG_FRAME_POINTER +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct stack_frame frame; + unsigned long __user *fp; + int err; + + perf_callchain_store(entry, regs->pc); + + fp = (unsigned long __user *)regs->regs[15]; + + while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) { + if (!access_ok(fp, sizeof(frame))) + break; + + pagefault_disable(); + err = __copy_from_user_inatomic(&frame, fp, sizeof(frame)); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address)) + perf_callchain_store(entry, frame.return_address); + fp = (void __user *)frame.next_frame; + } +} +#else /* !CONFIG_FRAME_POINTER */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long usp = rdusp(); + unsigned long user_addr; + int err; + + perf_callchain_store(entry, regs->pc); + + while (entry->nr < entry->max_stack && usp < current->mm->start_stack) { + if (!access_ok((const void __user *)usp, 8)) + break; + + pagefault_disable(); + err = __get_user(user_addr, (unsigned long *)usp); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr)) + perf_callchain_store(entry, user_addr); + usp = usp + 8; + } +} +#endif/* CONFIG_FRAME_POINTER */ + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(unsigned long pc, void *data) +{ + struct perf_callchain_entry_ctx *entry = data; + + perf_callchain_store(entry, pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + walk_stackframe(NULL, regs, callchain_trace, entry); +} + +/* + * Gets the perf_instruction_pointer and perf_misc_flags for guest os. + */ + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_state()) + return perf_guest_get_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned int guest_state = perf_guest_state(); + int misc = 0; + + if (guest_state) { + if (guest_state & PERF_GUEST_USER) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} + +/* + * Init call to initialise performance events at kernel startup. + */ +int __init init_hw_perf_events(void) +{ + if (!supported_cpu()) { + pr_info("Performance events: Unsupported CPU type!\n"); + return 0; + } + + pr_info("Performance events: Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = sw64_perf_event_irq_handler; + + /* And set up PMU specification */ + sw64_pmu = &core3_pmu; + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c new file mode 100644 index 000000000000..b036f213936b --- /dev/null +++ b/arch/sw_64/kernel/perf_regs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_SW64_MAX)) + return 0; + + return ((unsigned long *)regs)[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_SW64_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} -- Gitee From bf77105f4117baee17799f134ddc5bc3425ee086 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:17 +0800 Subject: [PATCH 321/953] anolis: sw64: add kexec support ANBZ: #4688 Add kexec support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kexec.h | 82 ++++++++++++ arch/sw_64/kernel/machine_kexec.c | 209 ++++++++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 arch/sw_64/include/asm/kexec.h create mode 100644 arch/sw_64/kernel/machine_kexec.c diff --git a/arch/sw_64/include/asm/kexec.h b/arch/sw_64/include/asm/kexec.h new file mode 100644 index 000000000000..25e0d8da84f8 --- /dev/null +++ b/arch/sw_64/include/asm/kexec.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KEXEC_H +#define _ASM_SW64_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 8192 + +#define KEXEC_ARCH KEXEC_ARCH_SW64 + +#define KEXEC_SW64_ATAGS_OFFSET 0x1000 +#define KEXEC_SW64_ZIMAGE_OFFSET 0x8000 + +#ifndef __ASSEMBLY__ + +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ("stl $0, %0" : "=m" (newregs->regs[0])); + __asm__ __volatile__ ("stl $1, %0" : "=m" (newregs->regs[1])); + __asm__ __volatile__ ("stl $2, %0" : "=m" (newregs->regs[2])); + __asm__ __volatile__ ("stl $3, %0" : "=m" (newregs->regs[3])); + __asm__ __volatile__ ("stl $4, %0" : "=m" (newregs->regs[4])); + __asm__ __volatile__ ("stl $5, %0" : "=m" (newregs->regs[5])); + __asm__ __volatile__ ("stl $6, %0" : "=m" (newregs->regs[6])); + __asm__ __volatile__ ("stl $7, %0" : "=m" (newregs->regs[7])); + __asm__ __volatile__ ("stl $8, %0" : "=m" (newregs->regs[8])); + __asm__ __volatile__ ("stl $9, %0" : "=m" (newregs->regs[9])); + __asm__ __volatile__ ("stl $10, %0" : "=m" (newregs->regs[10])); + __asm__ __volatile__ ("stl $11, %0" : "=m" (newregs->regs[11])); + __asm__ __volatile__ ("stl $12, %0" : "=m" (newregs->regs[12])); + __asm__ __volatile__ ("stl $13, %0" : "=m" (newregs->regs[13])); + __asm__ __volatile__ ("stl $14, %0" : "=m" (newregs->regs[14])); + __asm__ __volatile__ ("stl $15, %0" : "=m" (newregs->regs[15])); + __asm__ __volatile__ ("stl $16, %0" : "=m" (newregs->regs[16])); + __asm__ __volatile__ ("stl $17, %0" : "=m" (newregs->regs[17])); + __asm__ __volatile__ ("stl $18, %0" : "=m" (newregs->regs[18])); + __asm__ __volatile__ ("stl $19, %0" : "=m" (newregs->regs[19])); + __asm__ __volatile__ ("stl $20, %0" : "=m" (newregs->regs[20])); + __asm__ __volatile__ ("stl $21, %0" : "=m" (newregs->regs[21])); + __asm__ __volatile__ ("stl $22, %0" : "=m" (newregs->regs[22])); + __asm__ __volatile__ ("stl $23, %0" : "=m" (newregs->regs[23])); + __asm__ __volatile__ ("stl $24, %0" : "=m" (newregs->regs[24])); + __asm__ __volatile__ ("stl $25, %0" : "=m" (newregs->regs[25])); + __asm__ __volatile__ ("stl $26, %0" : "=m" (newregs->regs[26])); + __asm__ __volatile__ ("stl $27, %0" : "=m" (newregs->regs[27])); + __asm__ __volatile__ ("stl $28, %0" : "=m" (newregs->regs[28])); + __asm__ __volatile__ ("stl $29, %0" : "=m" (newregs->regs[29])); + __asm__ __volatile__ ("stl $30, %0" : "=m" (newregs->regs[30])); + newregs->pc = (unsigned long)current_text_addr(); + } +} + +/* Function pointer to optional machine-specific reinitialization */ +extern void (*kexec_reinit)(void); + +#endif /* __ASSEMBLY__ */ + +struct kimage; +extern unsigned long kexec_args[4]; + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_SW64_KEXEC_H */ diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c new file mode 100644 index 000000000000..950998476cda --- /dev/null +++ b/arch/sw_64/kernel/machine_kexec.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * machine_kexec.c for kexec + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include +#include + +#include + +extern void *kexec_control_page; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +static atomic_t waiting_for_crash_ipi; + +#ifdef CONFIG_SMP +extern struct smp_rcb_struct *smp_rcb; + +/* + * Wait for relocation code is prepared and send + * secondary CPUs to spin until kernel is relocated. + */ +static void kexec_smp_down(void *ignored) +{ + int cpu = smp_processor_id(); + + local_irq_disable(); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + set_cpu_online(cpu, false); + reset_cpu(cpu); +} +#endif + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + smp_call_function(kexec_smp_down, NULL, 0); + smp_wmb(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +#endif +} + +#ifdef CONFIG_SMP +static void machine_crash_nonpanic_core(void *unused) +{ + int cpu; + struct pt_regs regs; + + cpu = smp_processor_id(); + + local_irq_disable(); + crash_setup_regs(®s, NULL); + pr_debug("CPU %u will stop doing anything useful since another CPU has crashed\n", cpu); + crash_save_cpu(®s, cpu); + flush_cache_all(); + + set_cpu_online(cpu, false); + atomic_dec(&waiting_for_crash_ipi); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + if (cpu != 0) + reset_cpu(cpu); + else + machine_kexec(kexec_crash_image); +} +#else +static inline void machine_crash_nonpanic_core(void *unused) { } +#endif + +static void machine_kexec_mask_interrupts(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_chip *chip; + + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) + chip->irq_eoi(&desc->irq_data); + + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); + + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); + } +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int cpu; + unsigned long msecs; + + cpu = smp_processor_id(); + local_irq_disable(); + kernel_restart_prepare(NULL); + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + crash_save_cpu(regs, cpu); + machine_kexec_mask_interrupts(); + pr_info("Loading crashdump kernel...\n"); +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + if (cpu != 0) + reset_cpu(cpu); +#endif +} + +#define phys_to_ktext(pa) (__START_KERNEL_map + (pa)) + +typedef void (*noretfun_t)(void) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + struct boot_params *params = sunway_boot_params; + + + reboot_code_buffer = kexec_control_page; + pr_info("reboot_code_buffer = %px\n", reboot_code_buffer); + kexec_start_address = phys_to_ktext(image->start); + pr_info("kexec_start_address = %#lx\n", kexec_start_address); + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + else + kexec_indirection_page = (unsigned long)&image->head; + + pr_info("kexec_indirection_page = %#lx, image->head=%#lx\n", + kexec_indirection_page, image->head); + + params->cmdline = kexec_start_address - COMMAND_LINE_OFF; + params->initrd_start = *(__u64 *)(kexec_start_address - INITRD_START_OFF); + params->initrd_size = *(__u64 *)(kexec_start_address - INITRD_SIZE_OFF); + + pr_info("initrd_start = %#llx, initrd_size = %#llx\n" + "dtb_start = %#llx, efi_systab = %#llx\n" + "efi_memmap = %#llx, efi_memmap_size = %#llx\n" + "efi_memdesc_size = %#llx, efi_memdesc_version = %#llx\n" + "cmdline = %#llx\n", + params->initrd_start, params->initrd_size, + params->dtb_start, params->efi_systab, + params->efi_memmap, params->efi_memmap_size, + params->efi_memdesc_size, params->efi_memdesc_version, + params->cmdline); + + memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at %08lx\n", image->start); + pr_info("Bye ...\n"); + smp_wmb(); + ((noretfun_t) reboot_code_buffer)(); +} -- Gitee From 3dc6eff8775157e9bf0c3fe291dfdb47461213a7 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 322/953] anolis: sw64: add kdump support ANBZ: #4688 Add kdump support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/crash_dump.c | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 arch/sw_64/kernel/crash_dump.c diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c new file mode 100644 index 000000000000..4484673823b8 --- /dev/null +++ b/arch/sw_64/kernel/crash_dump.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/crash_dump.c + * + * Copyright (C) 2019 JN + * Author: He Sheng + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} -- Gitee From abe4ec1c3756fef52bb4128272b897a3bdbf1bb1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:03 +0800 Subject: [PATCH 323/953] anolis: sw64: add eBPF JIT support ANBZ: #4688 Add eBPF JIT support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/uapi/asm/bpf_perf_event.h | 9 + arch/sw_64/net/Makefile | 5 + arch/sw_64/net/bpf_jit.h | 368 +++++ arch/sw_64/net/bpf_jit_comp.c | 1455 ++++++++++++++++++ 4 files changed, 1837 insertions(+) create mode 100644 arch/sw_64/include/uapi/asm/bpf_perf_event.h create mode 100644 arch/sw_64/net/Makefile create mode 100644 arch/sw_64/net/bpf_jit.h create mode 100644 arch/sw_64/net/bpf_jit_comp.c diff --git a/arch/sw_64/include/uapi/asm/bpf_perf_event.h b/arch/sw_64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..52f6f1e555f1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BPF_PERF_EVENT_H +#define _UAPI_ASM_SW64_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI_ASM_SW64_BPF_PERF_EVENT_H */ diff --git a/arch/sw_64/net/Makefile b/arch/sw_64/net/Makefile new file mode 100644 index 000000000000..d4663b4bf509 --- /dev/null +++ b/arch/sw_64/net/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/sw_64/net/bpf_jit.h b/arch/sw_64/net/bpf_jit.h new file mode 100644 index 000000000000..929036d8ea6b --- /dev/null +++ b/arch/sw_64/net/bpf_jit.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SW64_NET_BPF_JIT_H +#define _SW64_NET_BPF_JIT_H + +/* SW64 instruction field shift */ +#define SW64_BPF_OPCODE_OFFSET 26 +#define SW64_BPF_RA_OFFSET 21 +#define SW64_BPF_RB_OFFSET 16 +#define SW64_BPF_SIMPLE_ALU_IMM_OFFSET 13 +#define SW64_BPF_SIMPLE_ALU_FUNC_OFFSET 5 +#define SW64_BPF_SIMPLE_ALU_RC_OFFSET 0 +#define SW64_BPF_LS_FUNC_OFFSET 12 + +/* SW64 instruction opcodes */ +#define SW64_BPF_OPCODE_CALL 0x01 +#define SW64_BPF_OPCODE_RET 0x02 +#define SW64_BPF_OPCODE_JMP 0x03 +#define SW64_BPF_OPCODE_BR 0x04 +#define SW64_BPF_OPCODE_BSR 0x05 +#define SW64_BPF_OPCODE_MISC 0x06 +#define SW64_BPF_OPCODE_LOCK 0x08 +#define SW64_BPF_OPCODE_ALU_REG 0x10 +#define SW64_BPF_OPCODE_ALU_IMM 0x12 +#define SW64_BPF_OPCODE_LDBU 0x20 +#define SW64_BPF_OPCODE_LDHU 0x21 +#define SW64_BPF_OPCODE_LDW 0x22 +#define SW64_BPF_OPCODE_LDL 0x23 +#define SW64_BPF_OPCODE_STB 0x28 +#define SW64_BPF_OPCODE_STH 0x29 +#define SW64_BPF_OPCODE_STW 0x2A +#define SW64_BPF_OPCODE_STL 0x2B +#define SW64_BPF_OPCODE_BEQ 0x30 +#define SW64_BPF_OPCODE_BNE 0x31 +#define SW64_BPF_OPCODE_BLT 0x32 +#define SW64_BPF_OPCODE_BLE 0x33 +#define SW64_BPF_OPCODE_BGT 0x34 +#define SW64_BPF_OPCODE_BGE 0x35 +#define SW64_BPF_OPCODE_BLBC 0x36 +#define SW64_BPF_OPCODE_BLBS 0x37 +#define SW64_BPF_OPCODE_LDI 0x3E +#define SW64_BPF_OPCODE_LDIH 0x3F + +/* SW64 MISC instructions function codes */ +#define SW64_BPF_FUNC_MISC_RD_F 0x1000 +#define SW64_BPF_FUNC_MISC_WR_F 0x1020 + +/* SW64 LOCK instructions function codes */ +#define SW64_BPF_FUNC_LOCK_LLDW 0x0 +#define SW64_BPF_FUNC_LOCK_LLDL 0x1 +#define SW64_BPF_FUNC_LOCK_LSTW 0x8 +#define SW64_BPF_FUNC_LOCK_LSTL 0x9 + +/* SW64 ALU instructions function codes */ +#define SW64_BPF_FUNC_ALU_ADDW 0x00 +#define SW64_BPF_FUNC_ALU_SUBW 0x01 +#define SW64_BPF_FUNC_ALU_ADDL 0x08 +#define SW64_BPF_FUNC_ALU_SUBL 0x09 +#define SW64_BPF_FUNC_ALU_MULW 0x10 +#define SW64_BPF_FUNC_ALU_MULL 0x18 +#define SW64_BPF_FUNC_ALU_CMPEQ 0x28 +#define SW64_BPF_FUNC_ALU_CMPLT 0x29 +#define SW64_BPF_FUNC_ALU_CMPLE 0x2A +#define SW64_BPF_FUNC_ALU_CMPULT 0x2B +#define SW64_BPF_FUNC_ALU_CMPULE 0x2C +#define SW64_BPF_FUNC_ALU_AND 0x38 +#define SW64_BPF_FUNC_ALU_BIC 0x39 +#define SW64_BPF_FUNC_ALU_BIS 0x3A +#define SW64_BPF_FUNC_ALU_ORNOT 0x3B +#define SW64_BPF_FUNC_ALU_XOR 0x3C +#define SW64_BPF_FUNC_ALU_EQV 0x3D +#define SW64_BPF_FUNC_ALU_SLL 0x48 +#define SW64_BPF_FUNC_ALU_SRL 0x49 +#define SW64_BPF_FUNC_ALU_SRA 0x4A +#define SW64_BPF_FUNC_ALU_ZAP 0x68 +#define SW64_BPF_FUNC_ALU_ZAPNOT 0x69 +#define SW64_BPF_FUNC_ALU_SEXTB 0x6A +#define SW64_BPF_FUNC_ALU_SEXTH 0x6B + +/* special instuction used in jit_fill_hole() */ +#define SW64_BPF_ILLEGAL_INSN (0x1ff00000) /* pri_ret/b $31 */ + +enum sw64_bpf_registers { + SW64_BPF_REG_V0 = 0, /* keep return value */ + SW64_BPF_REG_T0 = 1, + SW64_BPF_REG_T1 = 2, + SW64_BPF_REG_T2 = 3, + SW64_BPF_REG_T3 = 4, + SW64_BPF_REG_T4 = 5, + SW64_BPF_REG_T5 = 6, + SW64_BPF_REG_T6 = 7, + SW64_BPF_REG_T7 = 8, + SW64_BPF_REG_S0 = 9, /* callee saved */ + SW64_BPF_REG_S1 = 10, /* callee saved */ + SW64_BPF_REG_S2 = 11, /* callee saved */ + SW64_BPF_REG_S3 = 12, /* callee saved */ + SW64_BPF_REG_S4 = 13, /* callee saved */ + SW64_BPF_REG_S5 = 14, /* callee saved */ + SW64_BPF_REG_S6 = 15, /* callee saved */ + SW64_BPF_REG_FP = 15, /* frame pointer if necessary */ + SW64_BPF_REG_A0 = 16, /* argument 0 */ + SW64_BPF_REG_A1 = 17, /* argument 1 */ + SW64_BPF_REG_A2 = 18, /* argument 2 */ + SW64_BPF_REG_A3 = 19, /* argument 3 */ + SW64_BPF_REG_A4 = 20, /* argument 4 */ + SW64_BPF_REG_A5 = 21, /* argument 5 */ + SW64_BPF_REG_T8 = 22, + SW64_BPF_REG_T9 = 23, + SW64_BPF_REG_T10 = 24, + SW64_BPF_REG_T11 = 25, + SW64_BPF_REG_RA = 26, /* callee saved, keep retuen address */ + SW64_BPF_REG_T12 = 27, + SW64_BPF_REG_PV = 27, + SW64_BPF_REG_AT = 28, /* reserved by assembler */ + SW64_BPF_REG_GP = 29, /* global pointer */ + SW64_BPF_REG_SP = 30, /* callee saved, stack pointer */ + SW64_BPF_REG_ZR = 31 /* read 0 */ +}; + +/* SW64 load and store instructions */ +#define SW64_BPF_LDBU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDBU, dst, rb, offset16) +#define SW64_BPF_LDHU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDHU, dst, rb, offset16) +#define SW64_BPF_LDW(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDW, dst, rb, offset16) +#define SW64_BPF_LDL(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDL, dst, rb, offset16) +#define SW64_BPF_STB(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STB, src, rb, offset16) +#define SW64_BPF_STH(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STH, src, rb, offset16) +#define SW64_BPF_STW(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STW, src, rb, offset16) +#define SW64_BPF_STL(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STL, src, rb, offset16) +#define SW64_BPF_LDI(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDI, dst, rb, imm16) +#define SW64_BPF_LDIH(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDIH, dst, rb, imm16) + +/* SW64 lock instructions */ +#define SW64_BPF_LLDW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDW) +#define SW64_BPF_LLDL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDL) +#define SW64_BPF_LSTW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTW) +#define SW64_BPF_LSTL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTL) +#define SW64_BPF_RD_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_RD_F) +#define SW64_BPF_WR_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_WR_F) + +/* SW64 ALU instructions REG format */ +#define SW64_BPF_ADDW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 ALU instructions IMM format */ +#define SW64_BPF_ADDW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 bit shift instructions REG format */ +#define SW64_BPF_SLL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 bit shift instructions IMM format */ +#define SW64_BPF_SLL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 control instructions */ +#define SW64_BPF_CALL(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_CALL, ra, rb, 0) +#define SW64_BPF_RET(rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_RET, SW64_BPF_REG_ZR, rb, 0) +#define SW64_BPF_JMP(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_JMP, ra, rb, 0) +#define SW64_BPF_BR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BR, ra, offset) +#define SW64_BPF_BSR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BSR, ra, offset) +#define SW64_BPF_BEQ(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BEQ, ra, offset) +#define SW64_BPF_BNE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BNE, ra, offset) +#define SW64_BPF_BLT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLT, ra, offset) +#define SW64_BPF_BLE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLE, ra, offset) +#define SW64_BPF_BGT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGT, ra, offset) +#define SW64_BPF_BGE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGE, ra, offset) +#define SW64_BPF_BLBC(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBC, ra, offset) +#define SW64_BPF_BLBS(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBS, ra, offset) + +/* SW64 bit logic instructions REG format */ +#define SW64_BPF_AND_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 bit logic instructions IMM format */ +#define SW64_BPF_AND_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 compare instructions REG format */ +#define SW64_BPF_CMPEQ_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULE) + +/* SW64 compare instructions imm format */ +#define SW64_BPF_CMPEQ_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULE) + +#endif /* _SW64_NET_BPF_JIT_H */ diff --git a/arch/sw_64/net/bpf_jit_comp.c b/arch/sw_64/net/bpf_jit_comp.c new file mode 100644 index 000000000000..31202dd0f9cf --- /dev/null +++ b/arch/sw_64/net/bpf_jit_comp.c @@ -0,0 +1,1455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This file is taken from arch/arm64/net/bpf_jit_comp.c + * Copyright (C) 2014-2016 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "bpf_jit.h" + +#define TCALL_CNT (MAX_BPF_JIT_REG + 0) + +static const int bpf2sw64[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = SW64_BPF_REG_V0, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = SW64_BPF_REG_A0, + [BPF_REG_2] = SW64_BPF_REG_A1, + [BPF_REG_3] = SW64_BPF_REG_A2, + [BPF_REG_4] = SW64_BPF_REG_A3, + [BPF_REG_5] = SW64_BPF_REG_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = SW64_BPF_REG_S0, + [BPF_REG_7] = SW64_BPF_REG_S1, + [BPF_REG_8] = SW64_BPF_REG_S2, + [BPF_REG_9] = SW64_BPF_REG_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = SW64_BPF_REG_FP, + /* tail_call_cnt */ + [TCALL_CNT] = SW64_BPF_REG_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = SW64_BPF_REG_T11, +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; // JITed instruction index + int current_tmp_reg; + int epilogue_offset; + int *insn_offset; // [bpf_insn_idx] = jited_insn_idx + int exentry_idx; + u32 *image; // JITed instruction + u32 stack_size; +}; + +struct sw64_jit_data { + struct bpf_binary_header *header; + u8 *image; // bpf instruction + struct jit_ctx ctx; +}; + +static inline u32 sw64_bpf_gen_format_br(int opcode, enum sw64_bpf_registers ra, u32 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + return opcode | ra | (disp & 0x1fffff); +} + +static inline u32 sw64_bpf_gen_format_ls(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + return opcode | ra | rb | (disp & 0xffff); +} + +static inline u32 sw64_bpf_gen_format_ls_func(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + function = function << SW64_BPF_LS_FUNC_OFFSET; + return opcode | ra | rb | function | (disp & 0xfff); +} + +static inline u32 sw64_bpf_gen_format_simple_alu_reg(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | rb | function | rc; +} + +static inline u32 sw64_bpf_gen_format_simple_alu_imm(int opcode, enum sw64_bpf_registers ra, + u32 imm, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + imm = (imm & 0xff) << SW64_BPF_SIMPLE_ALU_IMM_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | imm | function | rc; +} + +static inline void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = insn; + + ctx->idx++; +} + +static inline int get_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg++; + /* Do not use 22-25. Should be more than enough. */ + if (unlikely(ctx->current_tmp_reg == 8)) { + pr_err("eBPF JIT %s[%d]: not enough temporary registers!\n", + current->comm, current->pid); + return -1; + } + return ctx->current_tmp_reg; +} + +static inline void put_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg--; + if (ctx->current_tmp_reg == 21) + ctx->current_tmp_reg = 7; +} + +static void emit_sw64_ldu32(const int dst, const u32 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= U32_MAX - S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 30) & 3; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 30, dst), ctx); + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_lds32(const int dst, const s32 imm, struct jit_ctx *ctx) +{ + s16 hi = imm >> 16; + s16 lo = imm & 0xffff; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + emit(SW64_BPF_LDIH(dst, SW64_BPF_REG_ZR, hi), ctx); + if (lo & 0x8000) { // sign bit is 1 + lo = lo & 0x7fff; + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } else { // sign bit is 0 + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } + + put_tmp_reg(ctx); +} + +static void emit_sw64_ldu64(const int dst, const u64 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= U32_MAX) { + put_tmp_reg(ctx); + return emit_sw64_ldu32(dst, (u32)imm, ctx); + } + + if (imm >= (U64_MAX - S16_MAX) || imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 60) & 0xf; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 60, dst), ctx); + + imm_tmp = (imm >> 45) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 45, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 30) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 30, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +/* Do not change!!! See arch/sw_64/lib/divide.S for more detail */ +#define REG(x) "$"str(x) +#define str(x) #x +#define DIV_RET_ADDR 23 +#define DIVIDEND 24 +#define DIVISOR 25 +#define RESULT 27 + +#include +static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, u8 code) +{ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, dst, DIVIDEND), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, DIVISOR), ctx); + switch (BPF_CLASS(code)) { + case BPF_ALU: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divwu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remwu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_ZAP_IMM(RESULT, 0xf0, dst), ctx); + break; + case BPF_ALU64: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divlu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remlu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, RESULT, dst), ctx); + break; + } +} + +#undef REG +#undef str +#undef DIVIDEND +#undef DIVISOR +#undef RESULT + +/* STX XADD: lock *(u32 *)(dst + off) += src */ +static void emit_sw64_xadd32(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDW(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDW_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTW(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +/* STX XADD: lock *(u64 *)(dst + off) += src */ +static void emit_sw64_xadd64(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDL(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDL_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTL(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe16(const int dst, struct jit_ctx *ctx) +{ + u8 tmp = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp), ctx); + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SRL_IMM(tmp, 8, tmp), ctx); + emit(SW64_BPF_SLL_IMM(dst, 8, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp, dst), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe32(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x8, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x4, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 24, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe64(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x80, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 56, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x40, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x20, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x10, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x08, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x04, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x02, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x01, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 56, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + unsigned long c = SW64_BPF_ILLEGAL_INSN; + + c |= c << 32; + __constant_c_memset(area, c, size); +} + +static int offset_to_epilogue(const struct jit_ctx *ctx); +static int bpf2sw64_offset(int bpf_idx, s32 off, const struct jit_ctx *ctx) +{ + int from = ctx->insn_offset[bpf_idx + 1]; + int to = ctx->insn_offset[bpf_idx + 1 + off]; + + if (ctx->image == NULL) + return 0; + + return to - from; +} + +static int offset_to_epilogue(const struct jit_ctx *ctx) +{ + if (ctx->image == NULL) + return 0; + + return ctx->epilogue_offset - ctx->idx; +} + +/* For tail call, jump to set up function call stack */ +#define PROLOGUE_OFFSET 11 + +static void build_prologue(struct jit_ctx *ctx, bool was_classic) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Save callee-saved registers */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -64), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_STL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_STL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_STL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_STL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_STL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_STL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + + /* Set up BPF prog stack base register */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_SP, fp), ctx); + if (!was_classic) + /* Initialize tail_call_cnt */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, tcc), ctx); + + /* Set up function call stack */ + ctx->stack_size = (ctx->prog->aux->stack_depth + 15) & (~15); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -ctx->stack_size), ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Destroy function call stack */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + + /* Restore callee-saved registers */ + emit(SW64_BPF_LDL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_LDL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_LDL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_LDL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_LDL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_LDL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_LDL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_LDL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, 64), ctx); + + /* Return */ + emit(SW64_BPF_RET(SW64_BPF_REG_RA), ctx); +} + +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + /* bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) */ + const u8 r2 = bpf2sw64[BPF_REG_2]; /* struct bpf_array *array */ + const u8 r3 = bpf2sw64[BPF_REG_3]; /* u32 index */ + + const u8 tmp = get_tmp_reg(ctx); + const u8 prg = get_tmp_reg(ctx); + const u8 tcc = bpf2sw64[TCALL_CNT]; + u64 offset; + static int out_idx; +#define out_offset (ctx->image ? (out_idx - ctx->idx - 1) : 0) + + /* if (index >= array->map.max_entries) + * goto out; + */ + offset = offsetof(struct bpf_array, map.max_entries); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &map.max_entries */ + emit(SW64_BPF_LDW(tmp, tmp, 0), ctx); /* tmp = *tmp = map.max_entries */ + emit(SW64_BPF_ZAP_IMM(tmp, 0xf0, tmp), ctx); /* map.max_entries is u32 */ + emit(SW64_BPF_ZAP_IMM(r3, 0xf0, r3), ctx); /* index is u32 */ + emit(SW64_BPF_CMPULE_REG(tmp, r3, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + emit_sw64_ldu64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(SW64_BPF_CMPULT_REG(tmp, tcc, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + emit(SW64_BPF_ADDL_IMM(tcc, 1, tcc), ctx); + + /* prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + */ + offset = offsetof(struct bpf_array, ptrs); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &ptrs[0] */ + emit(SW64_BPF_SLL_IMM(r3, 3, prg), ctx); /* prg = r3 * 8, each entry is a pointer */ + emit(SW64_BPF_ADDL_REG(tmp, prg, prg), ctx); /* prg = tmp + prg = &ptrs[index] */ + emit(SW64_BPF_LDL(prg, prg, 0), ctx); /* prg = *prg = ptrs[index] = prog */ + emit(SW64_BPF_BEQ(prg, out_offset), ctx); + + /* goto *(prog->bpf_func + prologue_offset); */ + offset = offsetof(struct bpf_prog, bpf_func); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(prg, tmp, tmp), ctx); /* tmp = prg + tmp = &bpf_func */ + emit(SW64_BPF_LDL(tmp, tmp, 0), ctx); /* tmp = *tmp = bpf_func */ + emit(SW64_BPF_BEQ(tmp, out_offset), ctx); + emit(SW64_BPF_LDI(tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + emit(SW64_BPF_JMP(SW64_BPF_REG_ZR, tmp), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + + /* out */ + if (ctx->image == NULL) + out_idx = ctx->idx; + if (ctx->image != NULL && out_idx <= 0) + return -1; +#undef out_offset + return 0; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + off_t offset; + unsigned long pc; + struct exception_table_entry *ex; + + if (!ctx->image) + /* First pass */ + return 0; + + if (!ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->exentry_idx]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = (long)&ex->insn - pc; + ex->insn = offset; + + ex->fixup.bits.nextinsn = sizeof(u32); + ex->fixup.bits.valreg = dst_reg; + ex->fixup.bits.errreg = SW64_BPF_REG_ZR; + + ctx->exentry_idx++; + return 0; +} + +/* JITs an eBPF instruction. + * Returns: + * 0 - successfully JITed an 8-byte eBPF instruction. + * >0 - successfully JITed a 16-byte eBPF instruction. + * <0 - failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + u8 dst = bpf2sw64[insn->dst_reg]; + u8 src = bpf2sw64[insn->src_reg]; + const u8 tmp1 __maybe_unused = get_tmp_reg(ctx); + const u8 tmp2 __maybe_unused = get_tmp_reg(ctx); + const s16 off = insn->off; + const s32 imm = insn->imm; + const int bpf_idx = insn - ctx->prog->insnsi; + s32 jmp_offset; + u64 func; + struct bpf_insn insn1; + u64 imm64; + int ret; + + switch (code) { + case BPF_ALU | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + break; + case BPF_ALU | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + emit(SW64_BPF_MULW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(SW64_BPF_MULL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + fallthrough; + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(SW64_BPF_SRL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_NEG: + emit(SW64_BPF_SUBW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_NEG: + emit(SW64_BPF_SUBL_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + break; + case BPF_ALU | BPF_END | BPF_TO_LE: + switch (imm) { + case 16: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x3, dst), ctx); + break; + case 32: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0xf, dst), ctx); + break; + case 64: + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_LE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + case BPF_ALU | BPF_END | BPF_TO_BE: + switch (imm) { + case 16: + emit_sw64_htobe16(dst, ctx); + break; + case 32: + emit_sw64_htobe32(dst, ctx); + break; + case 64: + emit_sw64_htobe64(dst, ctx); + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_BE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + + case BPF_ALU | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_ldu32(dst, imm, ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_lds32(dst, imm, ctx); + break; + case BPF_ALU | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_ADDW_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_ADDL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_DIV | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_RSH | BPF_K: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU64 | BPF_RSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_ARSH | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + break; + + case BPF_JMP | BPF_JA: + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, src, tmp1), ctx); + src = tmp1; + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(src, dst, tmp1), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, src, tmp1), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(src, dst, tmp1), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, src, tmp1), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + emit(SW64_BPF_XOR_IMM(tmp1, 1, tmp1), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, src, tmp1), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, src, tmp1), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, src, tmp1), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp1, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + emit(SW64_BPF_XOR_IMM(tmp2, 1, tmp2), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, tmp1, tmp2), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp2, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP | BPF_CALL: + func = (u64)__bpf_call_base + imm; + if ((func & ~(KERNEL_IMAGE_SIZE - 1)) != __START_KERNEL_map) + /* calling bpf program, switch to vmalloc addr */ + func = (func & U32_MAX) | VMALLOC_START; + emit_sw64_ldu64(SW64_BPF_REG_PV, func, ctx); + emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + break; + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + + case BPF_JMP | BPF_EXIT: + // if this is the last bpf instruction, skip to epilogue + if (bpf_idx == ctx->prog->len - 1) + break; + jmp_offset = offset_to_epilogue(ctx) - 1; + // epilogue is always at the end, must jump forward + if (jmp_offset >= -1 && jmp_offset <= 0xfffff) { + if (ctx->image && !jmp_offset) + // if this is the last jited instruction, generate nop + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + else + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_EXIT out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_LD | BPF_IMM | BPF_DW: + insn1 = insn[1]; + imm64 = ((u64)insn1.imm << 32) | (u32)imm; + emit_sw64_ldu64(dst, imm64, ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 1; + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_LDW(dst, src, off), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_H: + emit(SW64_BPF_LDHU(dst, src, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_LDBU(dst, src, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_LDL(dst, src, off), ctx); + break; + } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + /* Load imm to a register then store it */ + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_STW(tmp1, dst, off), ctx); + break; + case BPF_H: + emit(SW64_BPF_STH(tmp1, dst, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_STB(tmp1, dst, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_STL(tmp1, dst, off), ctx); + break; + } + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + emit(SW64_BPF_STW(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + emit(SW64_BPF_STH(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_B: + emit(SW64_BPF_STB(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + emit(SW64_BPF_STL(src, dst, off), ctx); + break; + + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + emit_sw64_xadd32(src, dst, off, ctx); + break; + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + emit_sw64_xadd64(src, dst, off, ctx); + break; + + default: + pr_err("eBPF JIT %s[%d]: unknown opcode 0x%02x\n", + current->comm, current->pid, code); + return -EINVAL; + } + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->idx; + ret = build_insn(insn, ctx); + if (ret < 0) + return ret; + while (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->insn_offset[i - 1]; + ret--; + } + } + + return 0; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->image[i] == SW64_BPF_ILLEGAL_INSN) + return -1; + } + + if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) + return -1; + + return 0; +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + struct sw64_jit_data *jit_data; + bool was_classic = bpf_prog_was_classic(prog); + bool tmp_blinded = false; + bool extra_pass = false; + struct jit_ctx ctx; + int image_size, prog_size, extable_size; + u8 *image_ptr; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.insn_offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + prog_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.insn_offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); + if (ctx.insn_offset == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 1. Initial fake pass to compute ctx->idx. */ + + /* Fake pass to fill in ctx->offset. */ + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } + + ctx.insn_offset[prog->len] = ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + + /* Now we know the actual image size. */ + /* And we need extra 8 bytes for lock instructions alignment */ + prog_size = sizeof(u32) * ctx.idx + 8; + image_size = prog_size + extable_size; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 2. Now, the actual pass. */ + + /* lock instructions need 8-byte alignment */ + ctx.image = (u32 *)(((unsigned long)image_ptr + 7) & (~7)); + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; +skip_init_ctx: + ctx.idx = 0; + ctx.exentry_idx = 0; + + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code. */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + /* And we're done. */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); + + bpf_flush_icache(header, ctx.image + ctx.idx); + + if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + prog->jited_len = prog_size; + if (ctx.current_tmp_reg) { + pr_err("eBPF JIT %s[%d]: unreleased temporary regsters %d\n", + current->comm, current->pid, ctx.current_tmp_reg); + } + + if (!prog->is_func || extra_pass) { +out_off: + kfree(ctx.insn_offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} -- Gitee From 85ca4c491b2c37c30fb3a2a0b2352c70f4364cda Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:36 +0800 Subject: [PATCH 324/953] anolis: sw64: add suspend support ANBZ: #4688 Add suspend support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/suspend.h | 50 ++++++++++++++++ arch/sw_64/kernel/suspend.c | 57 ++++++++++++++++++ arch/sw_64/kernel/suspend_asm.S | 99 ++++++++++++++++++++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 arch/sw_64/include/asm/suspend.h create mode 100644 arch/sw_64/kernel/suspend.c create mode 100644 arch/sw_64/kernel/suspend_asm.S diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h new file mode 100644 index 000000000000..833e27f9d5e1 --- /dev/null +++ b/arch/sw_64/include/asm/suspend.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SUSPEND_H +#define _ASM_SW64_SUSPEND_H + +#include +#include +#include +#define SOFTINF_SLEEP_MAGIC 0x0123456789ABCDEFUL + +#ifdef CONFIG_HIBERNATION +#include +#include +#endif + +struct callee_saved_regs { + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long ra; +}; + +struct callee_saved_fpregs { + unsigned long f2[4]; + unsigned long f3[4]; + unsigned long f4[4]; + unsigned long f5[4]; + unsigned long f6[4]; + unsigned long f7[4]; + unsigned long f8[4]; + unsigned long f9[4]; +} __aligned(32); /* 256 bits aligned for simd */ + +struct processor_state { + struct callee_saved_regs regs; + struct callee_saved_fpregs fpregs; + unsigned long fpcr; + unsigned long ktp; +#ifdef CONFIG_HIBERNATION + unsigned long sp; + struct vcpucb vcb; +#endif +}; + +extern void sw64_suspend_deep_sleep(struct processor_state *state); +extern const struct platform_suspend_ops native_suspend_ops; +#endif /* _ASM_SW64_SUSPEND_H */ diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c new file mode 100644 index 000000000000..27a240e66149 --- /dev/null +++ b/arch/sw_64/kernel/suspend.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include + +struct processor_state suspend_state; + +static int native_suspend_state_valid(suspend_state_t pm_state) +{ + switch (pm_state) { + case PM_SUSPEND_ON: + case PM_SUSPEND_STANDBY: + case PM_SUSPEND_MEM: + return 1; + default: + return 0; + } +} + +void disable_local_timer(void) +{ + wrtimer(0); +} + +extern struct pci_controller *hose_head; + +/* + * Boot Core will enter suspend stat here. + */ +void sw64_suspend_enter(void) +{ + /* boot processor will go to deep sleep mode from here + * After wake up boot processor, pc will go here + */ + disable_local_timer(); + current_thread_info()->pcb.tp = rtid(); + + sw64_suspend_deep_sleep(&suspend_state); + wrtp(current_thread_info()->pcb.tp); + + disable_local_timer(); +} + +static int native_suspend_enter(suspend_state_t state) +{ + if (is_in_guest()) + return 0; + /* processor specific suspend */ + sw64_suspend_enter(); + return 0; +} + +const struct platform_suspend_ops native_suspend_ops = { + .valid = native_suspend_state_valid, + .enter = native_suspend_enter, +}; diff --git a/arch/sw_64/kernel/suspend_asm.S b/arch/sw_64/kernel/suspend_asm.S new file mode 100644 index 000000000000..34ee349515a7 --- /dev/null +++ b/arch/sw_64/kernel/suspend_asm.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(sw64_suspend_deep_sleep) + /* a0 $16 will be the address of suspend_state */ + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) + + /* save the address of suspend_state to $18 */ + mov $16, $18 + + /* + * Now will Go to Deep Sleep + * HMcode should save pc, gp, ps, r16, r17, r18 + */ + + sys_call HMC_sleepen + sys_call HMC_whami + bis $0, $0, $16 + ldi $17, 0x2($31) + sys_call HMC_sendii + + /* wait for a while to receive interrupt */ + ldi $16, 0x1($31) + sll $16, 24, $16 +$subloop: + subl $16, 1, $16 + bis $16, $16, $16 + bis $16, $16, $16 + bne $16, $subloop + + + ldl $8, PSTATE_KTP($18) + ldi $1, PSTATE_REGS($18) + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($18) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $suspend_setfpec_0 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_1 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_2 + setfpec3 + br $suspend_setfpec_over +$suspend_setfpec_0: + setfpec0 + br $suspend_setfpec_over +$suspend_setfpec_1: + setfpec1 + br $suspend_setfpec_over +$suspend_setfpec_2: + setfpec2 +$suspend_setfpec_over: + ldi $1, PSTATE_FPREGS($18) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + ret +END(sw64_suspend_deep_sleep) -- Gitee From 9c8b6c79cbe0ddb46c3c8797923c80087e0d3981 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:13 +0800 Subject: [PATCH 325/953] anolis: sw64: add hibernation support ANBZ: #4688 Add hibernation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/hibernate.c | 79 +++++++++++++++++++ arch/sw_64/kernel/hibernate_asm.S | 122 ++++++++++++++++++++++++++++++ arch/sw_64/kernel/pm.c | 18 +++++ 3 files changed, 219 insertions(+) create mode 100644 arch/sw_64/kernel/hibernate.c create mode 100644 arch/sw_64/kernel/hibernate_asm.S create mode 100644 arch/sw_64/kernel/pm.c diff --git a/arch/sw_64/kernel/hibernate.c b/arch/sw_64/kernel/hibernate.c new file mode 100644 index 000000000000..644ea8504313 --- /dev/null +++ b/arch/sw_64/kernel/hibernate.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +struct processor_state hibernate_state; +/* Defined in hibernate_asm.S */ +extern int restore_image(void); + +void save_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + vcb->ksp = rdksp(); + vcb->usp = rdusp(); + vcb->soft_tid = rtid(); + vcb->ptbr = rdptbr(); +} + +void restore_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + wrksp(vcb->ksp); + wrusp(vcb->usp); + wrtp(vcb->soft_tid); + wrptbr(vcb->ptbr); + sflush(); + tbiv(); +} + +int swsusp_arch_resume(void) +{ + restore_image(); + return 0; +} +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +struct restore_data_record { + unsigned long magic; +}; + +#define RESTORE_MAGIC 0x0123456789ABCDEFUL + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->magic = RESTORE_MAGIC; + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; +} diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S new file mode 100644 index 000000000000..ff997cd76c5a --- /dev/null +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(swsusp_arch_suspend) + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + + stl $8, PSTATE_KTP($16) + stl sp, PSTATE_SP($16) + call swsusp_save + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + ldl $26, CALLEE_RA($1) + + /* save current_thread_info()->pcbb */ + ret +END(swsusp_arch_suspend) + +ENTRY(restore_image) + /* prepare to copy image data to their original locations */ + ldi t0, restore_pblist + ldl t0, 0(t0) +$loop: + beq t0, $done + + /* get addresses from the pbe and copy the page */ + ldl t1, PBE_ADDR(t0) /* source */ + ldl t2, PBE_ORIG_ADDR(t0) /* destination */ + ldi t3, PAGE_SIZE + addl t1, t3, t3 +$cpyloop: + ldl t8, 0(t1) + stl t8, 0(t2) + addl t1, 8, t1 + addl t2, 8, t2 + cmpeq t1, t3, t4 + beq t4, $cpyloop + + /* progress to the next pbe */ + ldl t0, PBE_NEXT(t0) + bne t0, $loop +$done: + + /* tell the hibernation core that we've just restored the memory */ + ldi $0, in_suspend + stl $31, 0($0) + + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($16) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $hibernate_setfpec_0 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_1 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_2 + setfpec3 + br $hibernate_setfpec_over +$hibernate_setfpec_0: + setfpec0 + br $hibernate_setfpec_over +$hibernate_setfpec_1: + setfpec1 + br $hibernate_setfpec_over +$hibernate_setfpec_2: + setfpec2 +$hibernate_setfpec_over: + ldi $1, PSTATE_FPREGS($16) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + + ldl sp, PSTATE_SP($16) + ldl $8, PSTATE_KTP($16) + sys_call HMC_wrktp + + ldi $0, 0($31) + + ret +END(restore_image) diff --git a/arch/sw_64/kernel/pm.c b/arch/sw_64/kernel/pm.c new file mode 100644 index 000000000000..f0a35e5d0486 --- /dev/null +++ b/arch/sw_64/kernel/pm.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +struct syscore_ops io_syscore_ops; + +static int __init sw64_pm_init(void) +{ +#ifdef CONFIG_SUSPEND + suspend_set_ops(&native_suspend_ops); +#endif + register_syscore_ops(&io_syscore_ops); + + return 0; +} +device_initcall(sw64_pm_init); -- Gitee From 9692fe64de95949e11a522c900c4daf5cb9b821e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:09 +0800 Subject: [PATCH 326/953] anolis: sw64: add ftrace support ANBZ: #4688 Add ftrace support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/ftrace.h | 44 ++++ arch/sw_64/include/asm/livepatch.h | 22 ++ arch/sw_64/kernel/entry-ftrace.S | 326 +++++++++++++++++++++++++++++ arch/sw_64/kernel/ftrace.c | 176 ++++++++++++++++ 4 files changed, 568 insertions(+) create mode 100644 arch/sw_64/include/asm/ftrace.h create mode 100644 arch/sw_64/include/asm/livepatch.h create mode 100644 arch/sw_64/kernel/entry-ftrace.S create mode 100644 arch/sw_64/kernel/ftrace.c diff --git a/arch/sw_64/include/asm/ftrace.h b/arch/sw_64/include/asm/ftrace.h new file mode 100644 index 000000000000..7ed6e3c06a33 --- /dev/null +++ b/arch/sw_64/include/asm/ftrace.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/ftrace.h + * + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SW64_FTRACE_H +#define _ASM_SW64_FTRACE_H + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE 20 /* 5 * SW64_INSN_SIZE */ +#define MCOUNT_LDGP_SIZE 8 /* 2 * SW64_INSN_SIZE */ + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#ifndef __ASSEMBLY__ +#include +#include + + +extern void _mcount(unsigned long); + +struct dyn_arch_ftrace { + /* No extra data needed for sw64 */ +}; + +extern unsigned long ftrace_graph_call; + + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#endif /* ifndef __ASSEMBLY__ */ +#endif /* _ASM_SW64_FTRACE_H */ diff --git a/arch/sw_64/include/asm/livepatch.h b/arch/sw_64/include/asm/livepatch.h new file mode 100644 index 000000000000..1feec0f6be76 --- /dev/null +++ b/arch/sw_64/include/asm/livepatch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * livepatch.h - sw64-specific Kernel Live Patching Core + */ + +#ifndef _ASM_SW64_LIVEPATCH_H +#define _ASM_SW64_LIVEPATCH_H + +#include + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->regs[27] = ip; + regs->regs[28] = ip; +} + +#endif /* _ASM_SW64_LIVEPATCH_H */ diff --git a/arch/sw_64/kernel/entry-ftrace.S b/arch/sw_64/kernel/entry-ftrace.S new file mode 100644 index 000000000000..73e8e043fc9d --- /dev/null +++ b/arch/sw_64/kernel/entry-ftrace.S @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/entry-ftrace.S + * + * Author: linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include + + .text + .set noat + .align 4 + +#define FTRACE_SP_OFF 0x50 + .macro mcount_enter + subl $sp, FTRACE_SP_OFF, $sp + stl $16, 0($sp) + stl $17, 0x8($sp) + stl $18, 0x10($sp) + stl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + stl $9, 0x20($sp) +#endif + stl $28, 0x28($sp) + stl $29, 0x30($sp) + stl $19, 0x38($sp) + stl $20, 0x40($sp) + stl $21, 0x48($sp) + .endm + + .macro mcount_end + ldl $16, 0($sp) + ldl $17, 0x8($sp) + ldl $18, 0x10($sp) + ldl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $9, 0x20($sp) +#endif + ldl $28, 0x28($sp) + ldl $29, 0x30($sp) + ldl $19, 0x38($sp) + ldl $20, 0x40($sp) + ldl $21, 0x48($sp) + addl $sp, FTRACE_SP_OFF, $sp + .endm + + .macro RESTORE_GRAPH_ARGS + ldi $16, 0x18($sp) /* &ra */ + bis $31, $9, $17 /* pc */ + #ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 /* fp */ + #endif + .endm + + .macro SAVE_PT_REGS + ldi $sp, -PT_REGS_SIZE($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + stl $28, PT_REGS_R28($sp) + stl $29, PT_REGS_GP($sp) + ldi $0, PT_REGS_SIZE($sp) + stl $0, PT_REGS_SP($sp) + .endm + + .macro RESTORE_PT_REGS + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $16, PT_REGS_R16($sp) + ldl $17, PT_REGS_R17($sp) + ldl $18, PT_REGS_R18($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldl $29, PT_REGS_GP($sp) + ldi $sp, PT_REGS_SIZE($sp) + .endm + + .macro RESTORE_GRAPH_REG_ARGS + ldi $16, PT_REGS_R26($sp) + bis $31, $9, $17 +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 +#endif + .endm + + /* save return value regs*/ + .macro save_return_regs + subl $sp, 0x8, $sp + stl $0, 0x0($sp) + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldl $0, 0x0($sp) + addl $sp, 0x8, $sp + .endm + + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from ftrace_caller() or ftrace_regs_caller() when function_graph + * tracer is selected. + * This function prepare_ftrace_return() fakes ra's value on the call + * stack in order to intercept instrumented function's return path and + * run return_to_handler() later on its exit. + */ + +ENTRY(ftrace_graph_caller) + ldgp $29, 0($27) + ldi $sp, -16($sp) + stl $26, 0($sp) + stl $15, 8($sp) + bis $31, $sp, $15 + + ldi $27, prepare_ftrace_return +ftrace_graph_call: + .global ftrace_graph_call + /* + * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite + * the nop below. + */ + nop /* nop, or call prepare_ftrace_return() */ + + ldl $26, 0($sp) + ldl $15, 8($sp) + ldi $sp, 16($sp) + ret $31, ($26), 1 +ENDPROC(ftrace_graph_caller) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller() + * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. + * + * It is run by "ret" instruction which does not modify $27, so it + * has to recaculate $27 before ldgp. + */ +ENTRY(return_to_handler) + br $27, 1f +1: ldgp $29, 0($27) + save_return_regs + bis $31, $15, $16 /* parent's fp */ + ldi $27, ftrace_return_to_handler + call $26, ($27) + bis $31, $0, $26 + restore_return_regs + ret $31, ($26), 1 +END(return_to_handler) + +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + .global _mcount + .ent _mcount +_mcount: + ret $31, ($28), 1 + .end _mcount + + + .global ftrace_caller + .ent ftrace_caller +ftrace_caller: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldl $18, function_trace_op + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * the graph tracer (specifically, prepare_ftrace_return) needs these + * arguments but for now the function tracer occupies the regs, so we + * save them in callee-saved regs to recover later. + */ + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_call +ftrace_call: /* tracer(pc, ra); */ + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_ARGS + call ftrace_graph_caller +#endif + mcount_end + ret $31, ($28), 1 + .end ftrace_caller +#else /* !CONFIG_DYNAMIC_FTRACE */ + + .global _mcount + .ent _mcount +_mcount: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + ldl $27, ftrace_trace_function // if (ftrace_trace_function + ldi $5, ftrace_stub // != ftrace_stub) + cmpeq $27, $5, $6 // + bne $6, skip_ftrace + + subl $28, MCOUNT_INSN_SIZE, $16 // function's pc +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + bis $26, $31, $17 // function's ra (parent's pc) + call $26, ($27) // (*ftrace_trace_function)(pc, ra); + +skip_ftrace: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $4, ftrace_graph_return // if ((ftrace_graph_return + cmpeq $4, $5, $6 // != ftrace_stub) + beq $6, 2f + ldl $4, ftrace_graph_entry // || (ftrace_graph_entry + ldi $5, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) + cmpeq $4, $5, $6 + bne $6, 3f +2: RESTORE_GRAPH_ARGS + call ftrace_graph_caller // ftrace_graph_caller(); +#endif +3: mcount_end + ret $31, ($28), 1 + .end _mcount + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + .global ftrace_regs_caller + .ent ftrace_regs_caller +ftrace_regs_caller: + SAVE_PT_REGS + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldi $4, function_trace_op + ldl $18, 0($4) + mov $sp, $19 + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_regs_call +ftrace_regs_call: + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_REG_ARGS + call ftrace_graph_caller +#endif + RESTORE_PT_REGS + ret $31, ($28), 1 + .end ftrace_regs_caller +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + + .global ftrace_stub + .ent ftrace_stub +ftrace_stub: + ret $31, ($26), 1 + .end ftrace_stub diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c new file mode 100644 index 000000000000..fb25ffe3dbda --- /dev/null +++ b/arch/sw_64/kernel/ftrace.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2019 os kernel team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define TI_FTRACE_ADDR (offsetof(struct thread_info, dyn_ftrace_addr)) +#define TI_FTRACE_REGS_ADDR \ + (offsetof(struct thread_info, dyn_ftrace_regs_addr)) + +unsigned long current_tracer = (unsigned long)ftrace_stub; + +/* + * Replace a single instruction, which may be a branch or NOP. + */ +static int ftrace_modify_code(unsigned long pc, u32 new) +{ + if (sw64_insn_write((void *)pc, new)) + return -EPERM; + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + int ret; + + current_tracer = (unsigned long)func; + pc = (unsigned long)&ftrace_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + } + + return ret; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int insn[3]; + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned long offset; + + if (addr == FTRACE_ADDR) + offset = TI_FTRACE_ADDR; + else + offset = TI_FTRACE_REGS_ADDR; + + insn[0] = SW64_NOP; + /* ldl r28,(ftrace_addr_offset)(r8) */ + insn[1] = (0x23U << 26) | (28U << 21) | (8U << 16) | offset; + insn[2] = SW64_CALL(R28, R28, 0); + + /* replace the 3 mcount instructions at once */ + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned int insn[3] = {SW64_NOP, SW64_NOP, SW64_NOP}; + + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); +} + +int __init ftrace_dyn_arch_init(void) +{ + struct thread_info *ti = task_thread_info(&init_task); + + ti->dyn_ftrace_addr = FTRACE_ADDR; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ti->dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; +#endif + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return 0; +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + * + * Note that @frame_pointer is used only for sanity check later. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; +} + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 new = SW64_NOP; + + if (enable) + new = SW64_CALL(R26, R27, 0); + return ftrace_modify_code(pc, new); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- Gitee From 0858b4d4f715e12271864bb40a76839c18e35437 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:32 +0800 Subject: [PATCH 327/953] anolis: sw64: add kernel relocation support ANBZ: #4688 Add kernel relocation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/relocate.c | 284 +++++++++++++ arch/sw_64/kernel/relocate_kernel.S | 176 ++++++++ arch/sw_64/tools/.gitignore | 2 + arch/sw_64/tools/Makefile | 8 + arch/sw_64/tools/relocs.c | 635 ++++++++++++++++++++++++++++ arch/sw_64/tools/relocs.h | 72 ++++ arch/sw_64/tools/relocs_main.c | 86 ++++ 7 files changed, 1263 insertions(+) create mode 100644 arch/sw_64/kernel/relocate.c create mode 100644 arch/sw_64/kernel/relocate_kernel.S create mode 100644 arch/sw_64/tools/.gitignore create mode 100644 arch/sw_64/tools/Makefile create mode 100644 arch/sw_64/tools/relocs.c create mode 100644 arch/sw_64/tools/relocs.h create mode 100644 arch/sw_64/tools/relocs_main.c diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c new file mode 100644 index 000000000000..ebdf7d894805 --- /dev/null +++ b/arch/sw_64/kernel/relocate.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for kernel relocation at boot time. + * + * Based on arch/mips/kernel/relocate.c + * + * Copyright (C) 2019 He Sheng + * Authors: He Sheng (hesheng05@gmail.com) + */ +#include +#include +#include + +#include + +#define KTEXT_MAX 0xffffffffa0000000UL +#define RELOCATED(x) ((void *)((unsigned long)x + offset)) + +extern unsigned long _got_start[]; +extern unsigned long _got_end[]; +extern char pre_start_kernel[]; + +extern unsigned int _relocation_start[]; /* End kernel image / start relocation table */ +extern unsigned int _relocation_end[]; /* End relocation table */ + +extern unsigned long __start___ex_table; /* Start exception table */ +extern unsigned long __stop___ex_table; /* End exception table */ +extern union thread_union init_thread_union; + +/* + * This function may be defined for a platform to perform any post-relocation + * fixup necessary. + * Return non-zero to abort relocation + */ +int __weak plat_post_relocation(long offset) +{ + return 0; +} + +static int __init apply_r_sw64_refquad(unsigned long *loc_orig, unsigned long *loc_new, unsigned int offset) +{ + *(unsigned long *)loc_new += offset; + + return 0; +} + +static int (*reloc_handlers_rel[]) (unsigned long *, unsigned long *, unsigned int) __initdata = { + [R_SW64_REFQUAD] = apply_r_sw64_refquad, +}; + +int __init do_relocations(void *kbase_old, void *kbase_new, unsigned int offset) +{ + unsigned int *r; + unsigned long *loc_orig; + unsigned long *loc_new; + int type; + int res; + + for (r = _relocation_start; r < _relocation_end; r++) { + /* Sentinel for last relocation */ + if (*r == 0) + break; + + type = (*r >> 24) & 0xff; + loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); + loc_new = RELOCATED(loc_orig); + + if (reloc_handlers_rel[type] == NULL) { + /* Unsupported relocation */ + pr_err("Unhandled relocation type %d at 0x%pK\n", + type, loc_orig); + return -ENOEXEC; + } + + res = reloc_handlers_rel[type](loc_orig, loc_new, offset); + if (res) + return res; + } + + return 0; +} + +static int __init relocate_got(unsigned int offset) +{ + unsigned long *got_start, *got_end, *e; + + got_start = RELOCATED(&_got_start); + got_end = RELOCATED(&_got_end); + + for (e = got_start; e < got_end; e++) + *e += offset; + + return 0; +} + +#ifdef CONFIG_RANDOMIZE_BASE + +static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) +{ + size_t i; + unsigned long start, *ptr; + /* Make sure start is 8 byte aligned */ + start = ALIGN((unsigned long)area, 8); + size -= (start - (unsigned long)area); + ptr = (unsigned long *) start; + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + return hash; +} + +static inline __init unsigned long get_random_boot(void) +{ + unsigned long entropy = random_get_entropy(); + unsigned long hash = 0; + + /* Attempt to create a simple but unpredictable starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); + + /* Add in any runtime entropy we can get */ + hash = rotate_xor(hash, &entropy, sizeof(entropy)); + + return hash; +} + +static inline __init bool kaslr_disabled(void) +{ + char *str; + + str = strstr(COMMAND_LINE, "nokaslr"); + if (str == COMMAND_LINE || (str > COMMAND_LINE && *(str - 1) == ' ')) + return true; + + return false; +} + +static unsigned long __init determine_relocation_offset(void) +{ + /* Choose a new address for the kernel */ + unsigned long kernel_length; + unsigned long offset; + + if (kaslr_disabled()) + return 0; + + kernel_length = (unsigned long)_end - (unsigned long)(&_text); + + /* TODO: offset is 64K align. maybe 8KB align is okay. */ + offset = get_random_boot() << 16; + offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); + if (offset < kernel_length) + offset += ALIGN(kernel_length, 0x10000); + + /* + * TODO:new location should not overlaps initrd, dtb, acpi + * tables, etc. + */ + + if ((KTEXT_MAX - (unsigned long)_end) < offset) + offset = 0; + + return offset; +} + +#else + +static inline unsigned long __init determine_relocation_offset(void) +{ + /* + * Choose a new address for the kernel + * For now we'll hard code the destination offset. + */ + return 0; +} + +#endif + +static inline int __init relocation_offset_valid(unsigned long offset) +{ + unsigned long loc_new = (unsigned long)_text + offset; + + if (loc_new & 0x0000ffff) { + /* Inappropriately aligned new location */ + return 0; + } + if (loc_new < (unsigned long)&_end) { + /* New location overlaps original kernel */ + return 0; + } + return 1; +} + +unsigned int __init relocate_kernel(void) +{ + void *loc_new; + unsigned long kernel_length; + unsigned long bss_length; + unsigned int offset = 0; + int res = 1; + + kernel_length = (unsigned long)(&_relocation_start) - (long)(&_text); + bss_length = (unsigned long)&__bss_stop - (long)&__bss_start; + + offset = determine_relocation_offset(); + /* Reset the command line now so we don't end up with a duplicate */ + + /* Sanity check relocation address */ + if (offset && relocation_offset_valid(offset)) { + + loc_new = RELOCATED(&_text); + /* Copy the kernel to it's new location */ + memcpy(loc_new, &_text, kernel_length); + + /* Perform relocations on the new kernel */ + res = do_relocations(&_text, loc_new, offset); + if (res < 0) + goto out; + + res = relocate_got(offset); + if (res < 0) + goto out; + + /* + * The original .bss has already been cleared, and + * some variables such as command line parameters + * stored to it so make a copy in the new location. + */ + memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length); + + /* + * Last chance for the platform to abort relocation. + * This may also be used by the platform to perform any + * initialisation required now that the new kernel is + * resident in memory and ready to be executed. + */ + if (plat_post_relocation(offset)) + goto out; + + /* Return the new kernel's offset */ + return offset; + } +out: + return 0; +} + +/* + * Show relocation information on panic. + */ +void show_kernel_relocation(const char *level) +{ + unsigned long offset; + + offset = __pa_symbol(_text) - __pa_symbol(_TEXT_START); + + if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { + printk(level); + pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); + pr_cont(" .text @ 0x%pK\n", _text); + pr_cont(" .data @ 0x%pK\n", _sdata); + pr_cont(" .bss @ 0x%pK\n", __bss_start); + } +} + +static int kernel_location_notifier_fn(struct notifier_block *self, + unsigned long v, void *p) +{ + show_kernel_relocation(KERN_EMERG); + return NOTIFY_DONE; +} + +static struct notifier_block kernel_location_notifier = { + .notifier_call = kernel_location_notifier_fn +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_location_notifier); + return 0; +} +device_initcall(register_kernel_offset_dumper); diff --git a/arch/sw_64/kernel/relocate_kernel.S b/arch/sw_64/kernel/relocate_kernel.S new file mode 100644 index 000000000000..f1a160636212 --- /dev/null +++ b/arch/sw_64/kernel/relocate_kernel.S @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * Created by Jul 2 2019 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include + + .align 3 + .globl relocate_new_kernel + .ent relocate_new_kernel + +relocate_new_kernel: + .prologue 0 + ldl a0, arg0 + ldl a1, arg1 + ldl a2, arg2 + ldl a3, arg3 + + ldl s0, kexec_indirection_page + ldl s1, kexec_start_address + +process_entry: + ldl s2, 0(s0) + addl s0, 8, s0 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beq s2, done + + /* destination page */ + and s2, 0x1, s3 + beq s3, 1f + bic s2, 0x1, s4/* store destination addr in s4 */ + br $31, process_entry + +1: + /* indirection page, update s0*/ + and s2, 0x2, s3 + beq s3, 1f + bic s2, 0x2, s0 + br $31, process_entry + +1: + /* done page */ + and s2, 0x4, s3 + beq s3, 1f + br $31, done +1: + /* source page */ + and s2, 0x8, s3 + beq s3, process_entry + bic s2, 0x8, s2 + ldi s6, 0x1 + sll s6, (PAGE_SHIFT - 3), s6 + +copy_word: + /* copy page word by word */ + ldl s5, 0(s2) + stl s5, 0(s4) + addl s4, 8, s4 + addl s2, 8, s2 + subl s6, 1, s6 + beq s6, process_entry + br $31, copy_word + br $31, process_entry + +done: +#ifdef CONFIG_CRASH_SMP /* unsupported now!!!! */ + /* kexec_flag reset is signal to other CPUs what kernel + * was moved to it's location. Note - we need relocated address + * of kexec_flag. + */ + + br ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + stl zero, 0(t0) +#endif + memb + jmp ra, (s1) + .end relocate_new_kernel + .size relocate_new_kernel, .-relocate_new_kernel + +#ifdef CONFIG_CRASH_SMP + /* + * Other CPUs should wait until code is relocated and + * then start at entry (?) point. + */ + .align 3 + .globl kexec_smp_wait + .ent kexec_smp_wait +kexec_smp_wait: + ldl a0, s_arg0 + ldl a1, s_arg1 + ldl a2, s_arg2 + ldl a3, s_arg3 + ldl s1, kexec_start_address + + /* Non-relocated address works for args and kexec_start_address (old + * kernel is not overwritten). But we need relocated address of + * kexec_flag. + */ + + bsr ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + +1: stl s0, 0(t0) + bne s0, 1b + memb + jmp ra, (s1) + .end kexec_smp_wait + .size kexec_smp_wait, .-kexec_smp_wait +#endif + + .align 3 + + /* All parameters to new kernel are passed in registers a0-a3. + * kexec_args[0..3] are uses to prepare register values. + */ + +kexec_args: + .globl kexec_args +arg0: .quad 0x0 +arg1: .quad 0x0 +arg2: .quad 0x0 +arg3: .quad 0x0 + .size kexec_args, 8*4 + +#ifdef CONFIG_CRASH_SMP + /* + * Secondary CPUs may have different kernel parameters in + * their registers a0-a3. secondary_kexec_args[0..3] are used + * to prepare register values. + */ +secondary_kexec_args: + .globl secondary_kexec_args +s_arg0: .quad 0x0 +s_arg1: .quad 0x0 +s_arg2: .quad 0x0 +s_arg3: .quad 0x0 + .size secondary_kexec_args, 8*4 + +kexec_flag: + .quad 0x1 +#endif + +kexec_start_address: + .globl kexec_start_address + .quad 0x0 + .size kexec_start_address, 8 + +kexec_indirection_page: + .globl kexec_indirection_page + .quad 0 + .size kexec_indirection_page, 8 + +relocate_new_kernel_end: + +relocate_new_kernel_size: + .global relocate_new_kernel_size + .quad relocate_new_kernel_end - relocate_new_kernel + .size relocate_new_kernel_size, 8 diff --git a/arch/sw_64/tools/.gitignore b/arch/sw_64/tools/.gitignore new file mode 100644 index 000000000000..f73e86272b76 --- /dev/null +++ b/arch/sw_64/tools/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +relocs diff --git a/arch/sw_64/tools/Makefile b/arch/sw_64/tools/Makefile new file mode 100644 index 000000000000..66f55b035e22 --- /dev/null +++ b/arch/sw_64/tools/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +hostprogs += relocs +relocs-objs += relocs.o +relocs-objs += relocs_main.o +PHONY += relocs +relocs: $(obj)/relocs + @: diff --git a/arch/sw_64/tools/relocs.c b/arch/sw_64/tools/relocs.c new file mode 100644 index 000000000000..ec0ed422a836 --- /dev/null +++ b/arch/sw_64/tools/relocs.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "relocs.h" + +#define ELF_BITS 64 + +#define ELF_MACHINE EM_SW64 +#define ELF_MACHINE_NAME "SW64" +#define SHT_REL_TYPE SHT_RELA +#define Elf_Rel Elf64_Rela + +#define ELF_CLASS ELFCLASS64 +#define ELF_R_SYM(val) ELF64_R_SYM(val) +#define ELF_R_TYPE(val) ELF64_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF64_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) + +#define ElfW(type) _ElfW(ELF_BITS, type) +#define _ElfW(bits, type) __ElfW(bits, type) +#define __ElfW(bits, type) Elf##bits##_##type + +#define Elf_Addr ElfW(Addr) +#define Elf_Ehdr ElfW(Ehdr) +#define Elf_Phdr ElfW(Phdr) +#define Elf_Shdr ElfW(Shdr) +#define Elf_Sym ElfW(Sym) + +static Elf_Ehdr ehdr; + +struct relocs { + uint32_t *offset; + unsigned long count; + unsigned long size; +}; + +static struct relocs relocs; + +struct section { + Elf_Shdr shdr; + struct section *link; + Elf_Sym *symtab; + Elf_Rel *reltab; + char *strtab; + long shdr_offset; +}; +static struct section *secs; + +static const char * const regex_sym_kernel = { +/* Symbols matching these regex's should never be relocated */ + "^(__crc_)", +}; + +static regex_t sym_regex_c; + +static int regex_skip_reloc(const char *sym_name) +{ + return !regexec(&sym_regex_c, sym_name, 0, NULL, 0); +} + +static void regex_init(void) +{ + char errbuf[128]; + int err; + + err = regcomp(&sym_regex_c, regex_sym_kernel, + REG_EXTENDED|REG_NOSUB); + + if (err) { + regerror(err, &sym_regex_c, errbuf, sizeof(errbuf)); + die("%s", errbuf); + } +} + +static const char *rel_type(unsigned int type) +{ + static const char * const type_name[] = { +#define REL_TYPE(X)[X] = #X + REL_TYPE(R_SW64_NONE), + REL_TYPE(R_SW64_REFQUAD), + REL_TYPE(R_SW64_LITERAL), + REL_TYPE(R_SW64_LITUSE), + REL_TYPE(R_SW64_GPDISP), + REL_TYPE(R_SW64_BRADDR), + REL_TYPE(R_SW64_HINT), + REL_TYPE(R_SW64_SREL32), + REL_TYPE(R_SW64_GPRELHIGH), + REL_TYPE(R_SW64_GPRELLOW), +#undef REL_TYPE + }; + const char *name = "unknown type rel type name"; + + if (type < ARRAY_SIZE(type_name) && type_name[type]) + name = type_name[type]; + return name; +} + +static const char *sec_name(unsigned int shndx) +{ + const char *sec_strtab; + const char *name; + + sec_strtab = secs[ehdr.e_shstrndx].strtab; + if (shndx < ehdr.e_shnum) + name = sec_strtab + secs[shndx].shdr.sh_name; + else if (shndx == SHN_ABS) + name = "ABSOLUTE"; + else if (shndx == SHN_COMMON) + name = "COMMON"; + else + name = ""; + return name; +} + +static struct section *sec_lookup(const char *secname) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) + if (strcmp(secname, sec_name(i)) == 0) + return &secs[i]; + + return NULL; +} + +static const char *sym_name(const char *sym_strtab, Elf_Sym *sym) +{ + const char *name; + + if (sym->st_name) + name = sym_strtab + sym->st_name; + else + name = sec_name(sym->st_shndx); + return name; +} + +#define le16_to_cpu(val) (val) +#define le32_to_cpu(val) (val) +#define le64_to_cpu(val) (val) + +#define cpu_to_le16(val) (val) +#define cpu_to_le32(val) (val) +#define cpu_to_le64(val) (val) + +static uint16_t elf16_to_cpu(uint16_t val) +{ + return le16_to_cpu(val); +} + +static uint32_t elf32_to_cpu(uint32_t val) +{ + return le32_to_cpu(val); +} + +static uint32_t cpu_to_elf32(uint32_t val) +{ + return cpu_to_le32(val); +} + +#define elf_half_to_cpu(x) elf16_to_cpu(x) +#define elf_word_to_cpu(x) elf32_to_cpu(x) + +#if ELF_BITS == 64 +static uint64_t elf64_to_cpu(uint64_t val) +{ + return le64_to_cpu(val); +} +#define elf_addr_to_cpu(x) elf64_to_cpu(x) +#define elf_off_to_cpu(x) elf64_to_cpu(x) +#define elf_xword_to_cpu(x) elf64_to_cpu(x) +#else +#define elf_addr_to_cpu(x) elf32_to_cpu(x) +#define elf_off_to_cpu(x) elf32_to_cpu(x) +#define elf_xword_to_cpu(x) elf32_to_cpu(x) +#endif + +static void read_ehdr(FILE *fp) +{ + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + die("Cannot read ELF header: %s\n", strerror(errno)); + + if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) + die("No ELF magic\n"); + + if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) + die("Not a %d bit executable\n", ELF_BITS); + + if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) && + (ehdr.e_ident[EI_DATA] != ELFDATA2MSB)) + die("Unknown ELF Endianness\n"); + + if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) + die("Unknown ELF version\n"); + + /* Convert the fields to native endian */ + ehdr.e_type = elf_half_to_cpu(ehdr.e_type); + ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine); + ehdr.e_version = elf_word_to_cpu(ehdr.e_version); + ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry); + ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff); + ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff); + ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags); + ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize); + ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize); + ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum); + ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize); + ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum); + ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx); + + if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) + die("Unsupported ELF header type\n"); + + if (ehdr.e_machine != ELF_MACHINE) + die("Not for %s\n", ELF_MACHINE_NAME); + + if (ehdr.e_version != EV_CURRENT) + die("Unknown ELF version\n"); + + if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) + die("Bad Elf header size\n"); + + if (ehdr.e_phentsize != sizeof(Elf_Phdr)) + die("Bad program header entry\n"); + + if (ehdr.e_shentsize != sizeof(Elf_Shdr)) + die("Bad section header entry\n"); + + if (ehdr.e_shstrndx >= ehdr.e_shnum) + die("String table index out of bounds\n"); +} + +static void read_shdrs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); + if (!secs) + die("Unable to allocate %d section headers\n", ehdr.e_shnum); + + if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno)); + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + sec->shdr_offset = ftell(fp); + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); + sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type); + sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags); + sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr); + sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset); + sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size); + sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link); + sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info); + sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign); + sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize); + if (sec->shdr.sh_link < ehdr.e_shnum) + sec->link = &secs[sec->shdr.sh_link]; + } +} + +static void read_strtabs(FILE *fp) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_STRTAB) + continue; + + sec->strtab = malloc(sec->shdr.sh_size); + if (!sec->strtab) + die("malloc of %d bytes for strtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + } +} + +static void read_symtabs(FILE *fp) +{ + int i, j; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_SYMTAB) + continue; + + sec->symtab = malloc(sec->shdr.sh_size); + if (!sec->symtab) + die("malloc of %d bytes for symtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym = &sec->symtab[j]; + + sym->st_name = elf_word_to_cpu(sym->st_name); + sym->st_value = elf_addr_to_cpu(sym->st_value); + sym->st_size = elf_xword_to_cpu(sym->st_size); + sym->st_shndx = elf_half_to_cpu(sym->st_shndx); + } + } +} + +static void read_relocs(FILE *fp) +{ + static unsigned long base; + int i, j; + + if (!base) { + struct section *sec = sec_lookup(".text"); + + if (!sec) + die("Could not find .text section\n"); + + base = sec->shdr.sh_addr; + } + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + sec->reltab = malloc(sec->shdr.sh_size); + if (!sec->reltab) + die("malloc of %d bytes for relocs failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + + rel->r_offset = elf_addr_to_cpu(rel->r_offset); + /* Set offset into kernel image */ + rel->r_offset -= base; + /* Convert SW64 RELA format - only the symbol + * index needs converting to native endianness + */ + rel->r_info = elf_xword_to_cpu(rel->r_info); +#if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +#endif + } + } +} + +static void remove_relocs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + + /* Set relocation section size to 0, effectively removing it. + * This is necessary due to lack of support for relocations + * in objcopy when creating 32bit elf from 64bit elf. + */ + shdr.sh_size = 0; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot write ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + } +} + +static void add_reloc(struct relocs *r, uint32_t offset, unsigned int type) +{ + /* Relocation representation in binary table: + * |76543210|76543210|76543210|76543210| + * | Type | offset from _text >> 2 | + */ + offset >>= 2; + if (offset > 0x00FFFFFF) + die("Kernel image exceeds maximum size for relocation!\n"); + + offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24); + + if (r->count == r->size) { + unsigned long newsize = r->size + 50000; + void *mem = realloc(r->offset, newsize * sizeof(r->offset[0])); + + if (!mem) + die("realloc failed\n"); + + r->offset = mem; + r->size = newsize; + } + r->offset[r->count++] = offset; +} + +static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) +{ + int i; + + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; + int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + sec_symtab = sec->link; + sec_applies = &secs[sec->shdr.sh_info]; + if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) + continue; + + sh_symtab = sec_symtab->symtab; + sym_strtab = sec_symtab->link->strtab; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; + const char *symname = sym_name(sym_strtab, sym); + + process(sec, rel, sym, symname); + } + } +} + +static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) +{ + unsigned int r_type = ELF_R_TYPE(rel->r_info); + unsigned int bind = ELF_ST_BIND(sym->st_info); + + if ((bind == STB_WEAK) && (sym->st_value == 0)) { + /* Don't relocate weak symbols without a target */ + return 0; + } + + if (regex_skip_reloc(symname)) + return 0; + + switch (r_type) { + case R_SW64_NONE: + case R_SW64_LITERAL: /* relocated by GOT */ + case R_SW64_LITUSE: + case R_SW64_GPDISP: + case R_SW64_BRADDR: + case R_SW64_HINT: + case R_SW64_SREL32: + case R_SW64_GPRELHIGH: + case R_SW64_GPRELLOW: + case R_SW64_LITERAL_GOT: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_SW64_REFQUAD: + add_reloc(&relocs, rel->r_offset, r_type); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; +} + +static int write_reloc_as_bin(uint32_t v, FILE *f) +{ + unsigned char buf[4]; + + v = cpu_to_elf32(v); + + memcpy(buf, &v, sizeof(uint32_t)); + return fwrite(buf, 1, 4, f); +} + +static int write_reloc_as_text(uint32_t v, FILE *f) +{ + int res; + + res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v); + if (res < 0) + return res; + else + return sizeof(uint32_t); +} + +static void emit_relocs(int as_text, int as_bin, FILE *outf) +{ + int i; + int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin; + int size = 0; + int size_reserved; + struct section *sec_reloc; + + sec_reloc = sec_lookup(".data.reloc"); + if (!sec_reloc) + die("Could not find relocation section\n"); + + size_reserved = sec_reloc->shdr.sh_size; + /* Collect up the relocations */ + walk_relocs(do_reloc); + + /* Print the relocations */ + if (as_text) { + /* Print the relocations in a form suitable that + * gas will like. + */ + printf(".section \".data.reloc\",\"a\"\n"); + printf(".balign 8\n"); + /* Output text to stdout */ + write_reloc = write_reloc_as_text; + outf = stdout; + } else if (as_bin) { + /* Output raw binary to stdout */ + outf = stdout; + } else { + /* + * Seek to offset of the relocation section. + * Each relocation is then written into the + * vmlinux kernel image. + */ + if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + sec_reloc->shdr.sh_offset, strerror(errno)); + } + } + + for (i = 0; i < relocs.count; i++) + size += write_reloc(relocs.offset[i], outf); + + /* Print a stop, but only if we've actually written some relocs */ + if (size) + size += write_reloc(0, outf); + + if (size > size_reserved) + /* + * Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE + * which will fix this problem and allow a bit of headroom + * if more kernel features are enabled + */ + die("Relocations overflow available space!\n" + "Please adjust CONFIG_RELOCATION_TABLE_SIZE " + "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF); +} + +/* + * As an aid to debugging problems with different linkers + * print summary information about the relocs. + * Since different linkers tend to emit the sections in + * different orders we use the section names in the output. + */ +static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) * sym, + const char *symname) +{ + printf("%16s 0x%x %16s %40s %16s\n", + sec_name(sec->shdr.sh_info), + (unsigned int)rel->r_offset, + rel_type(ELF_R_TYPE(rel->r_info)), + symname, + sec_name(sym->st_shndx)); + return 0; +} + +static void print_reloc_info(void) +{ + printf("%16s %10s %16s %40s %16s\n", + "reloc section", + "offset", + "reloc type", + "symbol", + "symbol section"); + walk_relocs(do_reloc_info); +} + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs) +{ + regex_init(); + read_ehdr(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); + read_relocs(fp); + if (show_reloc_info) { + print_reloc_info(); + return; + } + emit_relocs(as_text, as_bin, fp); + if (!keep_relocs) + remove_relocs(fp); +} diff --git a/arch/sw_64/tools/relocs.h b/arch/sw_64/tools/relocs.h new file mode 100644 index 000000000000..17c7e31113a0 --- /dev/null +++ b/arch/sw_64/tools/relocs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_TOOLS_RELOCS_H +#define _SW64_TOOLS_RELOCS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define USE_BSD +#include +#include + +#define EM_SW64 0x9916 +/* + * SW64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +void die(char *fmt, ...); + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs); +#endif /* _SW64_TOOLS_RELOCS_H */ diff --git a/arch/sw_64/tools/relocs_main.c b/arch/sw_64/tools/relocs_main.c new file mode 100644 index 000000000000..30a830a070db --- /dev/null +++ b/arch/sw_64/tools/relocs_main.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "relocs.h" + +void die(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(1); +} + +static void usage(void) +{ + die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n"); +} + +int main(int argc, char **argv) +{ + int show_reloc_info, as_text, as_bin, keep_relocs; + const char *fname; + FILE *fp; + int i; + unsigned char e_ident[EI_NIDENT]; + + show_reloc_info = 0; + as_text = 0; + as_bin = 0; + keep_relocs = 0; + fname = NULL; + for (i = 1; i < argc; i++) { + char *arg = argv[i]; + + if (*arg == '-') { + if (strcmp(arg, "--reloc-info") == 0) { + show_reloc_info = 1; + continue; + } + if (strcmp(arg, "--text") == 0) { + as_text = 1; + continue; + } + if (strcmp(arg, "--bin") == 0) { + as_bin = 1; + continue; + } + if (strcmp(arg, "--keep") == 0) { + keep_relocs = 1; + continue; + } + } else if (!fname) { + fname = arg; + continue; + } + usage(); + } + if (!fname) + usage(); + + fp = fopen(fname, "r+"); + if (!fp) + die("Cannot open %s: %s\n", fname, strerror(errno)); + + if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) + die("Cannot read %s: %s", fname, strerror(errno)); + + rewind(fp); + if (e_ident[EI_CLASS] == ELFCLASS64) + process(fp, as_text, as_bin, show_reloc_info, keep_relocs); + else + die("Unsupport ELF class on SW64: %s", fname); + //process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs); + fclose(fp); + return 0; +} -- Gitee From 0bdd507da81099822e174e058a2e10612b478394 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:18 +0800 Subject: [PATCH 328/953] anolis: sw64: add kprobe support ANBZ: #4688 Add kprobe support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kprobes.h | 76 +++++ arch/sw_64/kernel/insn.c | 110 ++++++++ arch/sw_64/kernel/kprobes/Makefile | 3 + arch/sw_64/kernel/kprobes/common.h | 9 + arch/sw_64/kernel/kprobes/decode-insn.c | 101 +++++++ arch/sw_64/kernel/kprobes/kprobes-ftrace.c | 48 ++++ arch/sw_64/kernel/kprobes/kprobes.c | 309 +++++++++++++++++++++ 7 files changed, 656 insertions(+) create mode 100644 arch/sw_64/include/asm/kprobes.h create mode 100644 arch/sw_64/kernel/insn.c create mode 100644 arch/sw_64/kernel/kprobes/Makefile create mode 100644 arch/sw_64/kernel/kprobes/common.h create mode 100644 arch/sw_64/kernel/kprobes/decode-insn.c create mode 100644 arch/sw_64/kernel/kprobes/kprobes-ftrace.c create mode 100644 arch/sw_64/kernel/kprobes/kprobes.c diff --git a/arch/sw_64/include/asm/kprobes.h b/arch/sw_64/include/asm/kprobes.h new file mode 100644 index 000000000000..0c7be8109ed2 --- /dev/null +++ b/arch/sw_64/include/asm/kprobes.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Probes (KProbes) + * Based on arch/mips/include/asm/kprobes.h + */ + +#ifndef _ASM_SW64_KPROBES_H +#define _ASM_SW64_KPROBES_H + +#include + +#define BREAK_KPROBE 0x40ffffff +#define BREAK_KPROBE_SS 0x40fffeff + +#ifdef CONFIG_KPROBES +#include +#include + +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct kprobe; +struct pt_regs; + +typedef u32 kprobe_opcode_t; + +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) \ +do { \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ + (unsigned long)p->addr + \ + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ +} while (0) + + +#define kretprobe_blacklist_size 0 + +void arch_remove_kprobe(struct kprobe *p); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +#define SKIP_DELAYSLOT 0x0001 + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + /* Per-thread fields, used while emulating branches */ + unsigned long flags; + unsigned long target_pc; + struct prev_kprobe prev_kprobe; +}; +extern int kprobe_handler(struct pt_regs *regs); +extern int post_kprobe_handler(struct pt_regs *regs); +extern int kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr); + + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_SW64_KPROBES_H */ diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c new file mode 100644 index 000000000000..281578e1bfc0 --- /dev/null +++ b/arch/sw_64/kernel/insn.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +//static DEFINE_RAW_SPINLOCK(patch_lock); + +int __kprobes sw64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, SW64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __sw64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +static int __kprobes __sw64_insn_double_write(void *addr, __le64 insn) +{ + void *waddr = addr; + //unsigned long flags = 0; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, 2 * SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes sw64_insn_write(void *addr, u32 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes sw64_insn_double_write(void *addr, u64 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_double_write(addr, cpu_to_le64(insn)); +} +unsigned int __kprobes sw64_insn_nop(void) +{ + return SW64_BIS(R31, R31, R31); +} + +unsigned int __kprobes sw64_insn_call(unsigned int ra, unsigned int rb) +{ + return SW64_CALL(ra, rb, 0); +} + +unsigned int __kprobes sw64_insn_sys_call(unsigned int num) +{ + return SW64_SYS_CALL(num); +} + +/* 'pc' is the address of br instruction, not the +4 PC. 'new_pc' is the target address. */ +unsigned int __kprobes sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc) +{ + int offset = new_pc - pc; + unsigned int disp, minus = 0x1fffff; + + if (!(offset <= BR_MAX_DISP && offset >= -BR_MAX_DISP)) + return -1; + if (offset > 0) + disp = (offset - 4) / 4; + else + disp = ~(-offset / 4) & minus; + + return SW64_BR(ra, disp); + +} diff --git a/arch/sw_64/kernel/kprobes/Makefile b/arch/sw_64/kernel/kprobes/Makefile new file mode 100644 index 000000000000..110ba2bf7752 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o diff --git a/arch/sw_64/kernel/kprobes/common.h b/arch/sw_64/kernel/kprobes/common.h new file mode 100644 index 000000000000..de10058f0376 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/common.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_KPROBES_COMMON_H +#define _SW64_KERNEL_KPROBES_COMMON_H + + +extern bool sw64_insn_can_kprobe(kprobe_opcode_t *addr); + + +#endif /* _SW64_KERNEL_KPROBES_COMMON_H */ diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c new file mode 100644 index 000000000000..91c31111f2b7 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/probes/decode-insn.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include + +#include "common.h" + +static bool __kprobes sw64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (sw64_insn_is_sys_call_b(insn) || + sw64_insn_is_sys_call(insn) || + sw64_insn_is_call(insn) || + sw64_insn_is_ret(insn) || + sw64_insn_is_jmp(insn) || + sw64_insn_is_br(insn) || + sw64_insn_is_bsr(insn) || + sw64_insn_is_memb(insn) || + sw64_insn_is_imemb(insn) || + sw64_insn_is_rtc(insn) || + sw64_insn_is_lldl(insn) || + sw64_insn_is_lldw(insn) || + sw64_insn_is_beq(insn) || + sw64_insn_is_bne(insn) || + sw64_insn_is_blt(insn) || + sw64_insn_is_ble(insn) || + sw64_insn_is_bgt(insn) || + sw64_insn_is_bge(insn) || + sw64_insn_is_blbc(insn) || + sw64_insn_is_blbs(insn) || + sw64_insn_is_fbeq(insn) || + sw64_insn_is_fbne(insn) || + sw64_insn_is_fblt(insn) || + sw64_insn_is_fble(insn) || + sw64_insn_is_fbgt(insn) || + sw64_insn_is_fbge(insn)) + return false; + + return true; +} + + +#ifdef CONFIG_KPROBES +// lldl rd_f +static bool __kprobes is_probed_between_atomic(kprobe_opcode_t *addr) +{ + int count = 0; + unsigned long size = 0, offset = 0; + kprobe_opcode_t *scan_start = NULL; + + if (kallsyms_lookup_size_offset((unsigned long)addr, &size, &offset)) + scan_start = addr - (offset / sizeof(kprobe_opcode_t)); + + while (scan_start < addr) { + if (sw64_insn_is_lldl(le32_to_cpu(*scan_start)) || + sw64_insn_is_lldw(le32_to_cpu(*scan_start))) + count++; + if (sw64_insn_is_rd_f(le32_to_cpu(*scan_start))) + count--; + scan_start++; + } + if (count) + return false; + + return true; +} + +bool __kprobes sw64_insn_can_kprobe(kprobe_opcode_t *addr) +{ + u32 insn = le32_to_cpu(*addr); + + if (!sw64_insn_is_steppable(insn)) { + pr_warn("addr is not steppable\n"); + return false; + } +#ifdef CONFIG_SUBARCH_C3B + if (!is_probed_between_atomic(addr)) { + pr_warn("addr between atomic can't probe\n"); + return false; + } +#endif + return true; +} +#endif diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c new file mode 100644 index 000000000000..89d7dba9dc25 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dynamic Ftrace based Kprobes Optimization + */ + +#include +#include +#include +#include +#include + +/* Ftrace callback handler for kprobes */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + return; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + regs->regs[28] -= MCOUNT_INSN_SIZE; + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + regs->regs[28] += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); + } +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c new file mode 100644 index 000000000000..024ce7d99e61 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel Probes (KProbes) + * arch/sw_64/kernel/kprobes.c + */ + +#include +#include +#include + +#include "common.h" + +static u32 breakpoint_insn = BREAK_KPROBE; +static u32 breakpoint2_insn = BREAK_KPROBE_SS; + +int post_kprobe_handler(struct pt_regs *regs); + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + int ret = 0; + extern char __start_rodata[]; + extern char __end_rodata[]; + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x3) + return -EINVAL; + + if (!sw64_insn_can_kprobe(p->addr)) + return -EINVAL; + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + + if (probe_addr >= (unsigned long) __start_rodata && + probe_addr <= (unsigned long) __end_rodata) + return -EINVAL; + + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + */ + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = breakpoint2_insn; +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, breakpoint_insn); + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, p->opcode); + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + + +static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + /* insn simulation */ + kcb->target_pc = regs->pc; + regs->pc = (unsigned long)&p->ainsn.insn[0]; +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + return 1; +} + +int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + if (user_mode(regs)) + return 0; + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + p = get_kprobe((kprobe_opcode_t *)(addr - 4)); + + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + return 1; + } + } + return 0; + +} +int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + // resume_execution(cur, regs, kcb); + regs->pc = kcb->target_pc; + + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + regs->pc = kcb->target_pc; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" + "nop\n\t" + : : : "memory"); +} + +void __kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[26]; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->regs[26] = (unsigned long)__kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); + instruction_pointer(regs) = orig_ret_address; + regs->regs[26] = orig_ret_address; + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} -- Gitee From 7dd8e04d060a56165027cc51841d20dccfc2182a Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 329/953] anolis: sw64: add uprobe support ANBZ: #4688 Add uprobe support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/uprobes.h | 45 ++++++++ arch/sw_64/kernel/uprobes.c | 182 +++++++++++++++++++++++++++++++ 2 files changed, 227 insertions(+) create mode 100644 arch/sw_64/include/asm/uprobes.h create mode 100644 arch/sw_64/kernel/uprobes.c diff --git a/arch/sw_64/include/asm/uprobes.h b/arch/sw_64/include/asm/uprobes.h new file mode 100644 index 000000000000..fcd2026c3622 --- /dev/null +++ b/arch/sw_64/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_SW64_UPROBES_H +#define _ASM_SW64_UPROBES_H + +#include +#include +#include + +/* + * We want this to be defined as union sw64_instruction but that makes the + * generic code blow up. + */ +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES SW64_INSN_SIZE +#define UPROBE_XOL_SLOT_BYTES SW64_INSN_SIZE + +#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */ +#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */ + +#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE +#define UPROBE_SWBP_INSN_SIZE MAX_UINSN_BYTES + +struct arch_uprobe { + u32 insn; + u32 ixol[2]; +}; + +struct arch_uprobe_task { + unsigned long saved_trap_nr; +}; + +#ifdef CONFIG_UPROBES +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc); +#else +static inline void +sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) {} +#endif + +#endif /* _ASM_SW64_UPROBES_H */ diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c new file mode 100644 index 000000000000..928312d62cfd --- /dev/null +++ b/arch/sw_64/kernel/uprobes.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +/** + * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. + * @mm: the probed address space. + * @arch_uprobe: the probepoint information. + * @addr: virtual address at which to install the probepoint + * Return 0 on success or a -ve number on error. + */ +int arch_uprobe_analyze_insn(struct arch_uprobe *aup, + struct mm_struct *mm, unsigned long addr) +{ + u32 inst; + + if (addr & 0x03) + return -EINVAL; + + inst = aup->insn; + + aup->ixol[0] = aup->insn; + aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ + + return 0; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + unsigned long kaddr, kstart; + + /* Initialize the slot */ + kaddr = (unsigned long)kmap_local_page(page); + kstart = kaddr + (vaddr & ~PAGE_MASK); + memcpy((void *)kstart, src, len); + flush_icache_range(kstart, kstart + len); + kunmap_local((void *)kaddr); +} + +/* + * arch_uprobe_pre_xol - prepare to execute out of line. + * @auprobe: the probepoint information. + * @regs: reflects the saved user state of current task. + */ +int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute ol */ + instruction_pointer_set(regs, utask->xol_vaddr); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute next to breakpoint address */ + instruction_pointer_set(regs, utask->vaddr + 4); + + return 0; +} + +/* + * If xol insn itself traps and generates a signal(Say, + * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped + * instruction jumps back to its own address. It is assumed that anything + * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. + * + * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, + * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to + * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). + */ +bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) +{ + return false; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + struct pt_regs *regs = args->regs; + + /* regs == NULL is a kernel bug */ + if (WARN_ON(!regs)) + return NOTIFY_DONE; + + /* We are only interested in userspace traps */ + if (!user_mode(regs)) + return NOTIFY_DONE; + + switch (val) { + case DIE_UPROBE: + if (uprobe_pre_sstep_notifier(regs)) + return NOTIFY_STOP; + break; + case DIE_UPROBE_XOL: + if (uprobe_post_sstep_notifier(regs)) + return NOTIFY_STOP; + default: + break; + } + + return 0; +} + +/* + * This function gets called when XOL instruction either gets trapped or + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. + */ +void arch_uprobe_abort_xol(struct arch_uprobe *aup, + struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + instruction_pointer_set(regs, utask->vaddr); +} + +unsigned long arch_uretprobe_hijack_return_addr( + unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->regs[26]; + + /* Replace the return address with the trampoline address */ + regs->regs[26] = trampoline_vaddr; + + return ra; +} + +/* + * See if the instruction can be emulated. + * Returns true if instruction was emulated, false otherwise. + * + * For now we always emulate so this function just returns 0. + */ +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return 0; +} + +/* + * struct xol_area and get_trampoline_vaddr() are copied from + * kernel/events/uprobes.c to avoid modifying arch-independent + * code. + */ +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + unsigned long *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + unsigned long vaddr; +}; + +static unsigned long get_trampoline_vaddr(void) +{ + struct xol_area *area; + unsigned long trampoline_vaddr = -1; + + area = READ_ONCE(current->mm->uprobes_state.xol_area); + if (area) + trampoline_vaddr = area->vaddr; + + return trampoline_vaddr; +} + +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) +{ + /* + * regs->pc has been changed to orig_ret_vaddr in handle_trampoline(). + */ + if (exc_pc == get_trampoline_vaddr()) + regs->regs[26] = regs->pc; +} -- Gitee From a7f263dd5dcd557abac475793a6dfef0c82f7904 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 330/953] anolis: sw64: add jump_label support ANBZ: #4688 Add jump_label support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/jump_label.h | 50 +++++++++++++++++++++++++++++ arch/sw_64/kernel/jump_label.c | 32 ++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 arch/sw_64/include/asm/jump_label.h create mode 100644 arch/sw_64/kernel/jump_label.c diff --git a/arch/sw_64/include/asm/jump_label.h b/arch/sw_64/include/asm/jump_label.h new file mode 100644 index 000000000000..32fbf7573b20 --- /dev/null +++ b/arch/sw_64/include/asm/jump_label.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_JUMP_LABEL_H +#define _ASM_SW64_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE SW64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: br %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_JUMP_LABEL_H */ diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c new file mode 100644 index 000000000000..f3bc40370e4d --- /dev/null +++ b/arch/sw_64/kernel/jump_label.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *insnp = (u32 *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_JMP) { + insn = sw64_insn_br(R31, (entry->code), entry->target); + BUG_ON(insn == -1); + } else { + insn = sw64_insn_nop(); + } + + *insnp = insn; + + flush_icache_range(entry->code, entry->code + SW64_INSN_SIZE); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * no need to rewrite NOP + */ +} -- Gitee From 31d99dc01de6d81b45357a268ad3ce7293b3fd70 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:17 +0800 Subject: [PATCH 331/953] anolis: sw64: add kgdb support ANBZ: #4688 Add kgdb support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kgdb.h | 68 ++++++++++ arch/sw_64/kernel/kgdb.c | 233 ++++++++++++++++++++++++++++++++++ 2 files changed, 301 insertions(+) create mode 100644 arch/sw_64/include/asm/kgdb.h create mode 100644 arch/sw_64/kernel/kgdb.c diff --git a/arch/sw_64/include/asm/kgdb.h b/arch/sw_64/include/asm/kgdb.h new file mode 100644 index 000000000000..a00a45ce767c --- /dev/null +++ b/arch/sw_64/include/asm/kgdb.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sw64 KGDB support + * + * Based on arch/arm64/include/kgdb.h + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_KGDB_H +#define _ASM_SW64_KGDB_H + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define GDB_ADJUSTS_BREAK_OFFSET +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__("sys_call %0" : : "i"(HMC_bpt)); +} + +void sw64_task_to_gdb_regs(struct task_struct *task, unsigned long *regs); + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; +extern unsigned long get_reg(struct task_struct *task, unsigned long regno); + +#endif /* !__ASSEMBLY__ */ + +/* + * general purpose registers size in bytes. + */ +#define DBG_MAX_REG_NUM (67) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 4096 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ +#define NUMREGBYTES (DBG_MAX_REG_NUM * 8) + +#endif /* _ASM_SW64_KGDB_H */ diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c new file mode 100644 index 000000000000..833f72a1577c --- /dev/null +++ b/arch/sw_64/kernel/kgdb.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 KGDB support + * + * Based on arch/arm64/kernel/kgdb.c + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "r0", 8, offsetof(struct pt_regs, regs[0])}, + { "r1", 8, offsetof(struct pt_regs, regs[1])}, + { "r2", 8, offsetof(struct pt_regs, regs[2])}, + { "r3", 8, offsetof(struct pt_regs, regs[3])}, + { "r4", 8, offsetof(struct pt_regs, regs[4])}, + { "r5", 8, offsetof(struct pt_regs, regs[5])}, + { "r6", 8, offsetof(struct pt_regs, regs[6])}, + { "r7", 8, offsetof(struct pt_regs, regs[7])}, + { "r8", 8, offsetof(struct pt_regs, regs[8])}, + + { "r9", 8, offsetof(struct pt_regs, regs[9])}, + { "r10", 8, offsetof(struct pt_regs, regs[10])}, + { "r11", 8, offsetof(struct pt_regs, regs[11])}, + { "r12", 8, offsetof(struct pt_regs, regs[12])}, + { "r13", 8, offsetof(struct pt_regs, regs[13])}, + { "r14", 8, offsetof(struct pt_regs, regs[14])}, + { "r15", 8, offsetof(struct pt_regs, regs[15])}, + + { "r16", 8, offsetof(struct pt_regs, regs[16])}, + { "r17", 8, offsetof(struct pt_regs, regs[17])}, + { "r18", 8, offsetof(struct pt_regs, regs[18])}, + + { "r19", 8, offsetof(struct pt_regs, regs[19])}, + { "r20", 8, offsetof(struct pt_regs, regs[20])}, + { "r21", 8, offsetof(struct pt_regs, regs[21])}, + { "r22", 8, offsetof(struct pt_regs, regs[22])}, + { "r23", 8, offsetof(struct pt_regs, regs[23])}, + { "r24", 8, offsetof(struct pt_regs, regs[24])}, + { "r25", 8, offsetof(struct pt_regs, regs[25])}, + { "r26", 8, offsetof(struct pt_regs, regs[26])}, + { "r27", 8, offsetof(struct pt_regs, regs[27])}, + { "at", 8, offsetof(struct pt_regs, regs[28])}, + { "gp", 8, offsetof(struct pt_regs, regs[29])}, + { "sp", 8, offsetof(struct pt_regs, regs[30])}, + { "zero", 8, -1 }, + + { "f0", 8, -1 }, + { "f1", 8, -1 }, + { "f2", 8, -1 }, + { "f3", 8, -1 }, + { "f4", 8, -1 }, + { "f5", 8, -1 }, + { "f6", 8, -1 }, + { "f7", 8, -1 }, + { "f8", 8, -1 }, + { "f9", 8, -1 }, + { "f10", 8, -1 }, + { "f11", 8, -1 }, + { "f12", 8, -1 }, + { "f13", 8, -1 }, + { "f14", 8, -1 }, + { "f15", 8, -1 }, + { "f16", 8, -1 }, + { "f17", 8, -1 }, + { "f18", 8, -1 }, + { "f19", 8, -1 }, + { "f20", 8, -1 }, + { "f21", 8, -1 }, + { "f22", 8, -1 }, + { "f23", 8, -1 }, + { "f24", 8, -1 }, + { "f25", 8, -1 }, + { "f26", 8, -1 }, + { "f27", 8, -1 }, + { "f28", 8, -1 }, + { "f29", 8, -1 }, + { "f30", 8, -1 }, + { "fpcr", 8, -1 }, + + { "pc", 8, offsetof(struct pt_regs, pc)}, + { "", 8, -1 }, + { "tp", 8, -1}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + int i; + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + for (i = 0; i < DBG_MAX_REG_NUM; i++) + gdb_regs[i] = get_reg(task, i); +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + pr_info("BEFORE SET PC WITH %lx\n", pc); + instruction_pointer(regs) = pc; + pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); +} + +void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(void) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + char *ptr; + unsigned long address = -1; + + switch (remcom_in_buffer[0]) { + case 'c': + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + kgdb_arch_set_pc(linux_regs, address); + return 0; + } + return -1; +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + /* Userspace events, ignore. */ + if (user_mode(regs)) + return NOTIFY_DONE; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, +}; + +/* + * kgdb_arch_init - Perform any architecture specific initalization. + * This function will handle the initalization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * sw64 instructions are always in LE. + * Break instruction is encoded in LE format + */ +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x80, 00, 00, 00} +}; -- Gitee From 0406f5c8efbb3227e4e58ffbd2854d6dc18c2501 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:30 +0800 Subject: [PATCH 332/953] anolis: sw64: add dynamic frequency scaling support ANBZ: #4688 Add dynamic frequency scaling support for SW64 based xuelang platform. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cpufreq.h | 66 ++++++++++++ arch/sw_64/platform/Makefile | 2 + arch/sw_64/platform/cpufreq_xuelang.c | 140 ++++++++++++++++++++++++++ 3 files changed, 208 insertions(+) create mode 100644 arch/sw_64/include/asm/cpufreq.h create mode 100644 arch/sw_64/platform/Makefile create mode 100644 arch/sw_64/platform/cpufreq_xuelang.c diff --git a/arch/sw_64/include/asm/cpufreq.h b/arch/sw_64/include/asm/cpufreq.h new file mode 100644 index 000000000000..cf47f1fc6866 --- /dev/null +++ b/arch/sw_64/include/asm/cpufreq.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CPUFREQ_H +#define _ASM_SW64_CPUFREQ_H + +#include +#include +#include +#include +#include + +struct clk; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +extern struct cpufreq_frequency_table freq_table[]; + +int clk_init(void); +void sw64_set_rate(unsigned int index); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CPUFREQ_H */ diff --git a/arch/sw_64/platform/Makefile b/arch/sw_64/platform/Makefile new file mode 100644 index 000000000000..4c0edceb4a2c --- /dev/null +++ b/arch/sw_64/platform/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += cpufreq_xuelang.o diff --git a/arch/sw_64/platform/cpufreq_xuelang.c b/arch/sw_64/platform/cpufreq_xuelang.c new file mode 100644 index 000000000000..1259e58dc874 --- /dev/null +++ b/arch/sw_64/platform/cpufreq_xuelang.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include +#include + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_RESV +}; + +struct cpufreq_frequency_table freq_table[] = { + {0, 200, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, + {0, DC_14, 0}, + {0, DC_15, 0}, + {-1, DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + int i; + unsigned char external_clk; + unsigned long max_rate, freq_off; + + max_rate = get_cpu_freq() / 1000; + + external_clk = *((unsigned char *)__va(MB_EXTCLK)); + + if (external_clk == 240) + freq_off = 60000; + else + freq_off = 50000; + + /* clock table init */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (i == 1) + freq_table[i].driver_data = freq_off * 24; + if (i == 2) + freq_table[i].frequency = freq_off * 36; + if (i > 2) + freq_table[i].frequency = freq_off * 38 + ((i - 3) * freq_off); + + if (freq_table[i].frequency == max_rate) + freq_table[i + 1].frequency = CPUFREQ_TABLE_END; + } + + return platform_device_register(&sw64_cpufreq_device); +} +arch_initcall(sw64_cpufreq_init); + +char curruent_policy[CPUFREQ_NAME_LEN]; + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy) +{ + int i; + u64 val; + struct cpufreq_frequency_table *ft = policy->freq_table; + + val = sw64_io_read(0, CLK_CTL) >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; ft[i].frequency != CPUFREQ_TABLE_END; i++) { + if (val == i) + return ft[i].frequency; + } + return 0; +} +EXPORT_SYMBOL(__sw64_cpufreq_get); + +void sw64_set_rate(unsigned int index) +{ + unsigned int i, val; + int cpu_num; + + cpu_num = sw64_chip->get_cpu_num(); + + for (i = 0; i < cpu_num; i++) { + sw64_io_write(i, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(i, CLK_CTL); + + sw64_io_write(i, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(i, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + } +} +EXPORT_SYMBOL_GPL(sw64_set_rate); -- Gitee From b83285a88070740278172ca4888922460318b928 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 333/953] anolis: sw64: add dynamic turning on/off cores support ANBZ: #4688 Add dynamic turning on/off cores support for SW64 based xuelang platform. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cputime.h | 9 + arch/sw_64/kernel/cpuautoplug.c | 485 +++++++++++++++++++++++++++++++ 2 files changed, 494 insertions(+) create mode 100644 arch/sw_64/include/asm/cputime.h create mode 100644 arch/sw_64/kernel/cpuautoplug.c diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h new file mode 100644 index 000000000000..cdd46b05e228 --- /dev/null +++ b/arch/sw_64/include/asm/cputime.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPUTIME_H +#define _ASM_SW64_CPUTIME_H + +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) + +#endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 000000000000..a7571a77a72c --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR_RO(available_value); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int i, min_cpu_idx; + cputime64_t min_time = arr[0]; + + for (i = 0; i < size; i++) { + if (arr[i] > 0 && arr[i] < min_time) { + min_time = arr[i]; + min_cpu_idx = i; + } + } + + return min_cpu_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[NR_CPUS]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + pr_info("The target_cpu is %d. After cpu_down, the cpu_num is %d\n", + cur_cpus, num_online_cpus()); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(smp_processor_id()); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", 0); + return; + } + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(policy->governor->name, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(policy->governor->name, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(policy->governor->name, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(policy->governor->name, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpuclass_attr_group); + if (ret) + return ret; + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +late_initcall(cpuautoplug_init); -- Gitee From bd1394791023cfe2508a505c9a0c37601c71c12b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:38:32 +0800 Subject: [PATCH 334/953] anolis: sw64: fix build support ANBZ: #4688 Modify scripts for SW64 build support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- scripts/package/buildtar | 3 +++ scripts/package/mkdebian | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 65b4ea502962..93158943a4f7 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -64,6 +64,9 @@ case "${ARCH}" in alpha) [ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}" ;; + sw_64) + [ -f "${objtree}/arch/sw_64/boot/vmlinux.bin" ] && cp -v -- "${objtree}/arch/sw_64/boot/vmlinux.bin" "${tmpdir}/boot/vmlinux-bin-${KERNELRELEASE}" + ;; parisc*) [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}" [ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}" diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index 5044224cf671..2586bcd5f43a 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -26,7 +26,7 @@ set_debarch() { # Attempt to find the correct Debian architecture case "$UTS_MACHINE" in - i386|ia64|alpha|m68k|riscv*) + i386|ia64|alpha|m68k|riscv*|sw_64) debarch="$UTS_MACHINE" ;; x86_64) debarch=amd64 ;; -- Gitee From b15274c5f722a2dbf9ab392201d2cf0c7e4c04e1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:01 +0800 Subject: [PATCH 335/953] anolis: sw64: fix ELF support ANBZ: #4688 Modify generic headers for SW64 ELF support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/elf-em.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index ef38c2bc5ab7..32458706a403 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -59,6 +59,7 @@ * up with a final number. */ #define EM_ALPHA 0x9026 +#define EM_SW64 0x9916 /* Bogus old m32r magic number, used by old tools. */ #define EM_CYGNUS_M32R 0x9041 -- Gitee From ac1b63940c6655a75ec7903150b63a680116ee8e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:37:09 +0800 Subject: [PATCH 336/953] anolis: sw64: fix rrk support ANBZ: #4688 Modify generic routines for SW64 rrk support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- kernel/printk/printk.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 35d32d66fb11..d142b6fe404f 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2202,10 +2202,17 @@ int vprintk_store(int facility, int level, u16 text_len; int ret = 0; u64 ts_nsec; +#ifdef CONFIG_SW64_RRK + extern int sw64_printk(const char *fmt, va_list args); +#endif if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0; +#ifdef CONFIG_SW64_RRK + sw64_printk(fmt, args); +#endif + /* * Since the duration of printk() can vary depending on the message * and state of the ringbuffer, grab the timestamp now so that it is -- Gitee From 248bf93e83cf98cac74138af4a3a5c33ffac698c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:30:40 +0800 Subject: [PATCH 337/953] anolis: sw64: fix ACPI support ANBZ: #4688 Modify generic headers for SW64 ACPI support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/acpi/pdc_sw64.h | 34 ++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 include/acpi/pdc_sw64.h diff --git a/include/acpi/pdc_sw64.h b/include/acpi/pdc_sw64.h new file mode 100644 index 000000000000..4724f10e8c6a --- /dev/null +++ b/include/acpi/pdc_sw64.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_PDC_SW64_H +#define _ASM_PDC_SW64_H + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* _ASM_PDC_SW64_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index afd94c9b8b8a..ba2f96c9a574 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -259,7 +259,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void -- Gitee From af09ac4d3f6578c1287fa95e57b509221dca2292 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:33:32 +0800 Subject: [PATCH 338/953] anolis: sw64: fix module support ANBZ: #4688 Modify generic headers for SW64 kernel module support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/moduleparam.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2cb5..c401b4e975cc 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) || defined(CONFIG_SW64) #define __moduleparam_const #else #define __moduleparam_const const -- Gitee From 4aefe6c23e25e95bfb735b1d0217faa2dd9062be Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:32:24 +0800 Subject: [PATCH 339/953] anolis: sw64: fix KVM support ANBZ: #4688 Modify generic headers and routines for SW64 KVM support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/kvm_host.h | 18 ++++++ include/uapi/linux/kvm.h | 5 ++ virt/kvm/kvm_main.c | 126 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdca..d027f8fd23bf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1766,6 +1766,9 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, +#ifdef CONFIG_SW64 + KVM_STAT_DFX_SW64, /* Detail For vcpu stat EXtension */ +#endif }; struct kvm_stat_data { @@ -1895,6 +1898,21 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) +#ifdef CONFIG_SW64 +enum dfx_sw64_stat_kind { + DFX_SW64_STAT_U64, + DFX_SW64_STAT_CPUTIME, +}; + +/* Detail For vcpu stat EXtension debugfs item */ +struct dfx_sw64_kvm_stats_debugfs_item { + const char *name; + int offset; + enum dfx_sw64_stat_kind dfx_kind; + struct dentry *dentry; +}; +extern struct dfx_sw64_kvm_stats_debugfs_item dfx_sw64_debugfs_entries[]; +#endif extern struct dentry *kvm_debugfs_dir; ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 863f84619a15..2eea9dd73c64 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1572,6 +1572,11 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +/* ioctl for SW vcpu init*/ +#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) +#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) +#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) + /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 486800a7024b..8bf0c5b84d3a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,6 +154,11 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +#ifdef CONFIG_SW64 +#define DFX_SW64_MAX_VCPU 1024 +#define DFX_SW64_MAX_VCPU_STAT_SIZE 1024 +#endif + __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { } @@ -4157,6 +4162,9 @@ static long kvm_vcpu_ioctl(struct file *filp, if (oldpid) synchronize_rcu(); put_pid(oldpid); +#ifdef CONFIG_SW64 + vcpu->stat.pid = current->pid; +#endif } r = kvm_arch_vcpu_ioctl_run(vcpu); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); @@ -5748,6 +5756,10 @@ static int kvm_stat_data_get(void *data, u64 *val) r = kvm_get_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset, val); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5770,6 +5782,10 @@ static int kvm_stat_data_clear(void *data, u64 val) r = kvm_clear_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5864,6 +5880,116 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); +#ifdef CONFIG_SW64 +void __weak kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ +} + +/* + * copy of seq_buf_alloc of kernel, kernel not export it + */ +static void *dfx_sw64_seq_buf_alloc(unsigned long size) +{ + return kvmalloc(size, GFP_KERNEL_ACCOUNT); +} + +static void dfx_sw64_seq_buf_free(const void *buf) +{ + kvfree(buf); +} + +static int dfx_sw64_seq_buf_alloc_vcpu(struct seq_file *p, int vcpu_nr) +{ + char *buf; + size_t size; + + size = (vcpu_nr + 1) * DFX_SW64_MAX_VCPU_STAT_SIZE; + buf = dfx_sw64_seq_buf_alloc(size); + if (!buf) + return -ENOMEM; + if (p->buf) + dfx_sw64_seq_buf_free(p->buf); + p->buf = buf; + p->size = size; + return 0; +} + +static int __dfx_sw64_vcpu_stats_get(struct seq_file *p, void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_stat *vcpu_stats; + struct dfx_sw64_kvm_stats_debugfs_item *dp; + int vcpu_nr = 0; + int index = 0; + unsigned long i; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu_nr++; + } + mutex_unlock(&kvm_lock); + vcpu_nr = min(vcpu_nr, DFX_SW64_MAX_VCPU); + if (!vcpu_nr) { + seq_putc(p, '\n'); + return 0; + } + + if (dfx_sw64_seq_buf_alloc_vcpu(p, vcpu_nr)) + return -ENOMEM; + + vcpu_stats = vmalloc(vcpu_nr * sizeof(struct kvm_vcpu_stat)); + if (!vcpu_stats) + return -ENOMEM; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (index >= vcpu_nr) + break; + memcpy(vcpu_stats + index, &(vcpu->stat), + sizeof(struct kvm_vcpu_stat)); + kvm_arch_vcpu_stat_reset(&vcpu->stat); + ++index; + } + } + mutex_unlock(&kvm_lock); + for (i = 0; i < vcpu_nr; i++) { + for (dp = dfx_sw64_debugfs_entries; dp->name; ++dp) { + switch (dp->dfx_kind) { + case DFX_SW64_STAT_U64: + seq_put_decimal_ull(p, " ", + *(u64 *)((void *)&vcpu_stats[i] + dp->offset)); + break; + case DFX_SW64_STAT_CPUTIME: + pr_warn("DFX_SW64_STAT_CPUTIME not supported currently!"); + break; + default: + pr_warn("Bad dfx_sw64_kind in dfx_debugfs_entries!"); + break; + } + } + seq_putc(p, '\n'); + } + + vfree(vcpu_stats); + return 0; +} + +static int dfx_sw64_vcpu_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, __dfx_sw64_vcpu_stats_get, NULL); +} + +static const struct file_operations dfx_sw64_stat_fops = { + .open = dfx_sw64_vcpu_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env; -- Gitee From 7aa008efd58d04a1a46baa4c668d38392f56da87 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:34:00 +0800 Subject: [PATCH 340/953] anolis: sw64: fix PCI support ANBZ: #4688 Modify generic headers for SW64 PCI support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/pci-ecam.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 6b1301e2498e..863e572202e2 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -88,6 +88,7 @@ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ +extern const struct pci_ecam_ops sw64_pci_ecam_ops; /* SW64 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) -- Gitee From 0fd299485edcbcb7ecca4e00104fd8d3fe6148fe Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:22 +0800 Subject: [PATCH 341/953] anolis: sw64: fix kexec support ANBZ: #4688 Modify generic headers for SW64 kexec support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/kexec.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 01766dd839b0..3be3e81c67ae 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -45,6 +45,7 @@ #define KEXEC_ARCH_AARCH64 (183 << 16) #define KEXEC_ARCH_RISCV (243 << 16) #define KEXEC_ARCH_LOONGARCH (258 << 16) +#define KEXEC_ARCH_SW64 (0x9916UL << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 -- Gitee From 19e19141803b1c542a65fce9ac7c7aae0bed9c96 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:41 +0800 Subject: [PATCH 342/953] anolis: sw64: fix audit support ANBZ: #4688 Modify generic headers for SW64 audit support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/audit.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d676ed2b246e..f428015e85de 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -441,6 +441,7 @@ enum { #define AUDIT_ARCH_XTENSA (EM_XTENSA) #define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE) #define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SW64 (EM_SW64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 -- Gitee From 39fbe2994d5d3068c6e7f63195a46e3f23f45b0c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:38:04 +0800 Subject: [PATCH 343/953] anolis: sw64: fix ftrace support ANBZ: #4688 Modify scripts for SW64 ftrace support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- scripts/recordmcount.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 40ae6b2c7a6d..73558f7eb690 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -52,6 +52,12 @@ #define R_AARCH64_CALL26 283 +#ifndef EM_SW64 +#define EM_SW64 0x9916 +#define R_SW64_NONE 0 +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -326,6 +332,16 @@ static int make_nop_arm64(void *map, size_t const offset) return 0; } +static unsigned char ideal_nop4_sw64[4] = {0x5f, 0x07, 0xff, 0x43}; + +static int make_nop_sw64(void *map, size_t const offset) +{ + /* Convert to nop */ + ulseek(offset, SEEK_SET); + uwrite(ideal_nop, 4); + return 0; +} + static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; @@ -475,6 +491,21 @@ static int LARCH64_is_fake_mcount(Elf64_Rel const *rp) return 1; } +#define SW64_FAKEMCOUNT_OFFSET 4 + +static int sw64_is_fake_mcount(Elf64_Rel const *rp) +{ + static Elf64_Addr old_r_offset = ~(Elf64_Addr)0; + Elf64_Addr current_r_offset = _w(rp->r_offset); + int is_fake; + + is_fake = (old_r_offset != ~(Elf64_Addr)0) && + (current_r_offset - old_r_offset == SW64_FAKEMCOUNT_OFFSET); + old_r_offset = current_r_offset; + + return is_fake; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -598,6 +629,14 @@ static int do_file(char const *const fname) case EM_S390: /* reltype: e_class */ break; case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break; case EM_SPARCV9: reltype = R_SPARC_64; break; + case EM_SW64: + reltype = R_SW64_REFQUAD; + make_nop = make_nop_sw64; + rel_type_nop = R_SW64_NONE; + ideal_nop = ideal_nop4_sw64; + mcount_adjust_64 = -12; + is_fake_mcount64 = sw64_is_fake_mcount; + break; case EM_X86_64: make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; -- Gitee From 1625356dd0321150a55edb7c4e7f51fb0e62d324 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:23:00 +0800 Subject: [PATCH 344/953] anolis: tools: add basic sw64 support ANBZ: #4688 Add common headers and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/arch/sw_64/include/asm/barrier.h | 9 ++ .../arch/sw_64/include/uapi/asm/bitsperlong.h | 9 ++ tools/arch/sw_64/include/uapi/asm/errno.h | 128 ++++++++++++++++++ tools/arch/sw_64/include/uapi/asm/mman.h | 46 +++++++ tools/arch/sw_64/include/uapi/asm/perf_regs.h | 41 ++++++ tools/build/feature/test-libunwind-sw_64.c | 27 ++++ 6 files changed, 260 insertions(+) create mode 100644 tools/arch/sw_64/include/asm/barrier.h create mode 100644 tools/arch/sw_64/include/uapi/asm/bitsperlong.h create mode 100644 tools/arch/sw_64/include/uapi/asm/errno.h create mode 100644 tools/arch/sw_64/include/uapi/asm/mman.h create mode 100644 tools/arch/sw_64/include/uapi/asm/perf_regs.h create mode 100644 tools/build/feature/test-libunwind-sw_64.c diff --git a/tools/arch/sw_64/include/asm/barrier.h b/tools/arch/sw_64/include/asm/barrier.h new file mode 100644 index 000000000000..bc4aeffeb681 --- /dev/null +++ b/tools/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_ASM_SW64_BARRIER_H +#define _TOOLS_LINUX_ASM_SW64_BARRIER_H + +#define mb() __asm__ __volatile__("mb" : : : "memory") +#define rmb() __asm__ __volatile__("mb" : : : "memory") +#define wmb() __asm__ __volatile__("mb" : : : "memory") + +#endif /* _TOOLS_LINUX_ASM_SW64_BARRIER_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/bitsperlong.h b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..f6a510c28233 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_SW64_BITSPERLONG_H +#define __ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_SW64_BITSPERLONG_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/errno.h b/tools/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 000000000000..2a43a943581a --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _SW64_ERRNO_H +#define _SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _SW64_ERRNO_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/mman.h b/tools/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 000000000000..a9603c93a34b --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define MADV_DODUMP 17 +#define MADV_DOFORK 11 +#define MADV_DONTDUMP 16 +#define MADV_DONTFORK 10 +#define MADV_DONTNEED 6 +#define MADV_FREE 8 +#define MADV_HUGEPAGE 14 +#define MADV_MERGEABLE 12 +#define MADV_NOHUGEPAGE 15 +#define MADV_NORMAL 0 +#define MADV_RANDOM 1 +#define MADV_REMOVE 9 +#define MADV_SEQUENTIAL 2 +#define MADV_UNMERGEABLE 13 +#define MADV_WILLNEED 3 +#define MAP_ANONYMOUS 0x10 +#define MAP_DENYWRITE 0x02000 +#define MAP_EXECUTABLE 0x04000 +#define MAP_FILE 0 +#define MAP_FIXED 0x100 +#define MAP_GROWSDOWN 0x01000 +#define MAP_HUGETLB 0x100000 +#define MAP_LOCKED 0x08000 +#define MAP_NONBLOCK 0x40000 +#define MAP_NORESERVE 0x10000 +#define MAP_POPULATE 0x20000 +#define MAP_STACK 0x80000 +#define PROT_EXEC 0x4 +#define PROT_GROWSDOWN 0x01000000 +#define PROT_GROWSUP 0x02000000 +#define PROT_NONE 0x0 +#define PROT_READ 0x1 +#define PROT_SEM 0x8 +#define PROT_WRITE 0x2 +/* MADV_HWPOISON is undefined on alpha, fix it for perf */ +#define MADV_HWPOISON 100 +/* MADV_SOFT_OFFLINE is undefined on alpha, fix it for perf */ +#define MADV_SOFT_OFFLINE 101 +/* MAP_32BIT is undefined on alpha, fix it for perf */ +#define MAP_32BIT 0 +/* MAP_UNINITIALIZED is undefined on alpha, fix it for perf */ +#define MAP_UNINITIALIZED 0 +#endif /* TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/perf_regs.h b/tools/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..892be5261026 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _ASM_SW64_PERF_REGS_H +#define _ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _ASM_SW64_PERF_REGS_H */ diff --git a/tools/build/feature/test-libunwind-sw_64.c b/tools/build/feature/test-libunwind-sw_64.c new file mode 100644 index 000000000000..274948b961f4 --- /dev/null +++ b/tools/build/feature/test-libunwind-sw_64.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +static unw_accessors_t accessors; + +int main(void) +{ + unw_addr_space_t addr_space; + + addr_space = unw_create_addr_space(&accessors, 0); + if (addr_space) + return 0; + + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + + return 0; +} -- Gitee From aa39647130c49f6451242192570d6443a132dc35 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:28:12 +0800 Subject: [PATCH 345/953] anolis: tools: fix basic sw64 support ANBZ: #4688 Modify generic headers and Makefiles for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/build/Makefile.feature | 1 + tools/build/feature/Makefile | 9 +++++++++ tools/include/uapi/asm/bitsperlong.h | 2 ++ tools/include/uapi/asm/errno.h | 2 ++ 4 files changed, 14 insertions(+) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 934e2777a2db..fb290a90b263 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -54,6 +54,7 @@ FEATURE_TESTS_BASIC := \ libtracefs \ libcrypto \ libunwind \ + libunwind-sw_64 \ pthread-attr-setaffinity-np \ pthread-barrier \ reallocarray \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dad79ede4e0a..cb57e46cec4b 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 include ../../scripts/Makefile.include +ARCH ?= $(shell uname -m) FILES= \ test-all.bin \ test-backtrace.bin \ @@ -45,6 +46,7 @@ FILES= \ test-libunwind-x86_64.bin \ test-libunwind-arm.bin \ test-libunwind-aarch64.bin \ + test-libunwind-sw_64.bin \ test-libunwind-debug-frame-arm.bin \ test-libunwind-debug-frame-aarch64.bin \ test-pthread-attr-setaffinity-np.bin \ @@ -86,7 +88,11 @@ all: $(FILES) __BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS) BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1 +ifeq ($(ARCH),sw_64) + BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz +else BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl +endif BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS) @@ -189,6 +195,9 @@ $(OUTPUT)test-libunwind-arm.bin: $(OUTPUT)test-libunwind-aarch64.bin: $(BUILD) -lelf -lunwind-aarch64 +$(OUTPUT)test-libunwind-sw_64.bin: + $(BUILD) -lelf -lunwind-sw_64 + $(OUTPUT)test-libunwind-debug-frame-arm.bin: $(BUILD) -lelf -lunwind-arm diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index c65267afc341..036e2fc92d1a 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,8 @@ #include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/bitsperlong.h" #else #include #endif diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index 869379f91fe4..bcfa3d742933 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -11,6 +11,8 @@ #include "../../../arch/mips/include/uapi/asm/errno.h" #elif defined(__hppa__) #include "../../../arch/parisc/include/uapi/asm/errno.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/errno.h" #else #include #endif -- Gitee From b85a3c334401012178b4bcd4319ca64779dd749e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:27:19 +0800 Subject: [PATCH 346/953] anolis: perf: add sw64 support ANBZ: #4688 Add Build, Makefiles, common headers and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/perf/arch/sw_64/Build | 2 + tools/perf/arch/sw_64/Makefile | 4 + tools/perf/arch/sw_64/include/arch-tests.h | 12 +++ tools/perf/arch/sw_64/include/perf_regs.h | 92 ++++++++++++++++++ tools/perf/arch/sw_64/tests/Build | 3 + tools/perf/arch/sw_64/tests/arch-tests.c | 16 ++++ tools/perf/arch/sw_64/tests/dwarf-unwind.c | 63 +++++++++++++ tools/perf/arch/sw_64/tests/regs_load.S | 47 ++++++++++ tools/perf/arch/sw_64/util/Build | 4 + tools/perf/arch/sw_64/util/dwarf-regs.c | 94 +++++++++++++++++++ tools/perf/arch/sw_64/util/perf_regs.c | 6 ++ tools/perf/arch/sw_64/util/unwind-libdw.c | 60 ++++++++++++ tools/perf/arch/sw_64/util/unwind-libunwind.c | 84 +++++++++++++++++ tools/perf/util/libunwind/sw64.c | 33 +++++++ 14 files changed, 520 insertions(+) create mode 100644 tools/perf/arch/sw_64/Build create mode 100644 tools/perf/arch/sw_64/Makefile create mode 100644 tools/perf/arch/sw_64/include/arch-tests.h create mode 100644 tools/perf/arch/sw_64/include/perf_regs.h create mode 100644 tools/perf/arch/sw_64/tests/Build create mode 100644 tools/perf/arch/sw_64/tests/arch-tests.c create mode 100644 tools/perf/arch/sw_64/tests/dwarf-unwind.c create mode 100644 tools/perf/arch/sw_64/tests/regs_load.S create mode 100644 tools/perf/arch/sw_64/util/Build create mode 100644 tools/perf/arch/sw_64/util/dwarf-regs.c create mode 100644 tools/perf/arch/sw_64/util/perf_regs.c create mode 100644 tools/perf/arch/sw_64/util/unwind-libdw.c create mode 100644 tools/perf/arch/sw_64/util/unwind-libunwind.c create mode 100644 tools/perf/util/libunwind/sw64.c diff --git a/tools/perf/arch/sw_64/Build b/tools/perf/arch/sw_64/Build new file mode 100644 index 000000000000..36222e64bbf7 --- /dev/null +++ b/tools/perf/arch/sw_64/Build @@ -0,0 +1,2 @@ +perf-y += util/ +perf-$(CONFIG_DWARF_UNWIND) += tests/ diff --git a/tools/perf/arch/sw_64/Makefile b/tools/perf/arch/sw_64/Makefile new file mode 100644 index 000000000000..1aa9dd772489 --- /dev/null +++ b/tools/perf/arch/sw_64/Makefile @@ -0,0 +1,4 @@ +ifndef NO_DWARF +PERF_HAVE_DWARF_REGS := 1 +endif +PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 diff --git a/tools/perf/arch/sw_64/include/arch-tests.h b/tools/perf/arch/sw_64/include/arch-tests.h new file mode 100644 index 000000000000..90ec4c8cb880 --- /dev/null +++ b/tools/perf/arch/sw_64/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/sw_64/include/perf_regs.h b/tools/perf/arch/sw_64/include/perf_regs.h new file mode 100644 index 000000000000..e0c1b15375b5 --- /dev/null +++ b/tools/perf/arch/sw_64/include/perf_regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include +#include +#include + +void perf_regs_load(u64 *regs); + +#define PERF_REGS_MASK ((1ULL << PERF_REG_SW64_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_SW64_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 + +#define PERF_REG_IP PERF_REG_SW64_PC +#define PERF_REG_SP PERF_REG_SW64_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_SW64_R0: + return "r0"; + case PERF_REG_SW64_R1: + return "r1"; + case PERF_REG_SW64_R2: + return "r2"; + case PERF_REG_SW64_R3: + return "r3"; + case PERF_REG_SW64_R4: + return "r4"; + case PERF_REG_SW64_R5: + return "r5"; + case PERF_REG_SW64_R6: + return "r6"; + case PERF_REG_SW64_R7: + return "r7"; + case PERF_REG_SW64_R8: + return "r8"; + case PERF_REG_SW64_R9: + return "r9"; + case PERF_REG_SW64_R10: + return "r10"; + case PERF_REG_SW64_R11: + return "r11"; + case PERF_REG_SW64_R12: + return "r12"; + case PERF_REG_SW64_R13: + return "r13"; + case PERF_REG_SW64_R14: + return "r14"; + case PERF_REG_SW64_R15: + return "r15"; + case PERF_REG_SW64_R16: + return "r16"; + case PERF_REG_SW64_R17: + return "r17"; + case PERF_REG_SW64_R18: + return "r18"; + case PERF_REG_SW64_R19: + return "r19"; + case PERF_REG_SW64_R20: + return "r20"; + case PERF_REG_SW64_R21: + return "r21"; + case PERF_REG_SW64_R22: + return "r22"; + case PERF_REG_SW64_R23: + return "r23"; + case PERF_REG_SW64_R24: + return "r24"; + case PERF_REG_SW64_R25: + return "r25"; + case PERF_REG_SW64_R26: + return "r26"; + case PERF_REG_SW64_R27: + return "r27"; + case PERF_REG_SW64_R28: + return "r28"; + case PERF_REG_SW64_GP: + return "gp"; + case PERF_REG_SW64_SP: + return "sp"; + case PERF_REG_SW64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/sw_64/tests/Build b/tools/perf/arch/sw_64/tests/Build new file mode 100644 index 000000000000..b8a38eadfb35 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/Build @@ -0,0 +1,3 @@ +perf-y += regs_load.o +perf-y += dwarf-unwind.o +perf-y += arch-tests.o diff --git a/tools/perf/arch/sw_64/tests/arch-tests.c b/tools/perf/arch/sw_64/tests/arch-tests.c new file mode 100644 index 000000000000..5b1543c98022 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; diff --git a/tools/perf/arch/sw_64/tests/dwarf-unwind.c b/tools/perf/arch/sw_64/tests/dwarf-unwind.c new file mode 100644 index 000000000000..cd7047b7a546 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/dwarf-unwind.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "maps.h" +#include "event.h" +#include "debug.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_SW64_SP]; + + map = maps__find(thread->maps, (u64)sp); + if (!map) { + printf("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = calloc(1, sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/sw_64/tests/regs_load.S b/tools/perf/arch/sw_64/tests/regs_load.S new file mode 100644 index 000000000000..8c5aabc2c6fb --- /dev/null +++ b/tools/perf/arch/sw_64/tests/regs_load.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +.text +.set noat +.type perf_regs_load,%function +#define STL_REG(r) stl $r, (8 * r)($16) +#define LDL_REG(r) ldl $r, (8 * r)($16) +#define SP (8 * 30) +#define PC (8 * 31) +SYM_FUNC_START(perf_regs_load) + STL_REG(0) + STL_REG(1) + STL_REG(2) + STL_REG(3) + STL_REG(4) + STL_REG(5) + STL_REG(6) + STL_REG(7) + STL_REG(8) + STL_REG(9) + STL_REG(10) + STL_REG(11) + STL_REG(12) + STL_REG(13) + STL_REG(14) + STL_REG(15) + STL_REG(16) + STL_REG(17) + STL_REG(18) + STL_REG(19) + STL_REG(20) + STL_REG(21) + STL_REG(22) + STL_REG(23) + STL_REG(24) + STL_REG(25) + STL_REG(26) + STL_REG(27) + STL_REG(28) + STL_REG(29) + mov $30, $17 + stl $17, (SP)($16) + stl $26, (PC)($16) + LDL_REG(17) + ret +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/sw_64/util/Build b/tools/perf/arch/sw_64/util/Build new file mode 100644 index 000000000000..39f459b636a0 --- /dev/null +++ b/tools/perf/arch/sw_64/util/Build @@ -0,0 +1,4 @@ +perf-y += perf_regs.o +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o diff --git a/tools/perf/arch/sw_64/util/dwarf-regs.c b/tools/perf/arch/sw_64/util/dwarf-regs.c new file mode 100644 index 000000000000..11c1ee5444da --- /dev/null +++ b/tools/perf/arch/sw_64/util/dwarf-regs.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Will Deacon, ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include /* for struct user_pt_regs */ +#include +#include "util.h" + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; +}; + +#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%x##num), .dwarfnum = num} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} +#define DWARFNUM2OFFSET(index) \ + (index * sizeof((struct user_pt_regs *)0)->regs[0]) + +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + REG_DWARFNUM_NAME("%fp", 15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + REG_DWARFNUM_NAME("%gp", 29), + REG_DWARFNUM_NAME("%sp", 30), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} + +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return DWARFNUM2OFFSET(roff->dwarfnum); + return -EINVAL; +} diff --git a/tools/perf/arch/sw_64/util/perf_regs.c b/tools/perf/arch/sw_64/util/perf_regs.c new file mode 100644 index 000000000000..2833e101a7c6 --- /dev/null +++ b/tools/perf/arch/sw_64/util/perf_regs.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../util/perf_regs.h" + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; diff --git a/tools/perf/arch/sw_64/util/unwind-libdw.c b/tools/perf/arch/sw_64/util/unwind-libdw.c new file mode 100644 index 000000000000..3e2b6acc40ac --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libdw.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" +#include "../../util/event.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[PERF_REG_SW64_MAX], dwarf_pc; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_SW64_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(R16); + dwarf_regs[17] = REG(R17); + dwarf_regs[18] = REG(R18); + dwarf_regs[19] = REG(R19); + dwarf_regs[20] = REG(R20); + dwarf_regs[21] = REG(R21); + dwarf_regs[22] = REG(R22); + dwarf_regs[23] = REG(R23); + dwarf_regs[24] = REG(R24); + dwarf_regs[25] = REG(R25); + dwarf_regs[26] = REG(R26); + dwarf_regs[27] = REG(R27); + dwarf_regs[28] = REG(R28); + dwarf_regs[29] = REG(R29); + dwarf_regs[30] = REG(R30); + dwarf_regs[31] = REG(R31); + + if (!dwfl_thread_state_registers(thread, 0, PERF_REG_SW64_MAX, + dwarf_regs)) + return false; + + dwarf_pc = REG(PC); + dwfl_thread_state_register_pc(thread, dwarf_pc); + + return true; +} diff --git a/tools/perf/arch/sw_64/util/unwind-libunwind.c b/tools/perf/arch/sw_64/util/unwind-libunwind.c new file mode 100644 index 000000000000..134e3c2280d2 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libunwind.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#ifndef REMOTE_UNWIND_LIBUNWIND +#include +#include "perf_regs.h" +#include "../../util/unwind.h" +#include "../../util/debug.h" +#endif + +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + switch (regnum) { + case UNW_SW_64_R0: + return PERF_REG_SW64_R0; + case UNW_SW_64_R1: + return PERF_REG_SW64_R1; + case UNW_SW_64_R2: + return PERF_REG_SW64_R2; + case UNW_SW_64_R3: + return PERF_REG_SW64_R3; + case UNW_SW_64_R4: + return PERF_REG_SW64_R4; + case UNW_SW_64_R5: + return PERF_REG_SW64_R5; + case UNW_SW_64_R6: + return PERF_REG_SW64_R6; + case UNW_SW_64_R7: + return PERF_REG_SW64_R7; + case UNW_SW_64_R8: + return PERF_REG_SW64_R8; + case UNW_SW_64_R9: + return PERF_REG_SW64_R9; + case UNW_SW_64_R10: + return PERF_REG_SW64_R10; + case UNW_SW_64_R11: + return PERF_REG_SW64_R11; + case UNW_SW_64_R12: + return PERF_REG_SW64_R12; + case UNW_SW_64_R13: + return PERF_REG_SW64_R13; + case UNW_SW_64_R14: + return PERF_REG_SW64_R14; + case UNW_SW_64_R15: + return PERF_REG_SW64_R15; + case UNW_SW_64_R16: + return PERF_REG_SW64_R16; + case UNW_SW_64_R17: + return PERF_REG_SW64_R17; + case UNW_SW_64_R18: + return PERF_REG_SW64_R18; + case UNW_SW_64_R19: + return PERF_REG_SW64_R19; + case UNW_SW_64_R20: + return PERF_REG_SW64_R20; + case UNW_SW_64_R21: + return PERF_REG_SW64_R21; + case UNW_SW_64_R22: + return PERF_REG_SW64_R22; + case UNW_SW_64_R23: + return PERF_REG_SW64_R23; + case UNW_SW_64_R24: + return PERF_REG_SW64_R24; + case UNW_SW_64_R25: + return PERF_REG_SW64_R25; + case UNW_SW_64_R26: + return PERF_REG_SW64_R26; + case UNW_SW_64_R27: + return PERF_REG_SW64_R27; + case UNW_SW_64_R28: + return PERF_REG_SW64_R28; + case UNW_SW_64_R29: + return PERF_REG_SW64_GP; + case UNW_SW_64_R30: + return PERF_REG_SW64_SP; + case UNW_SW_64_PC: + return PERF_REG_SW64_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/util/libunwind/sw64.c b/tools/perf/util/libunwind/sw64.c new file mode 100644 index 000000000000..12452bf2ab8b --- /dev/null +++ b/tools/perf/util/libunwind/sw64.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file setups defines to compile arch specific binary from the + * generic one. + * + * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch + * name and the defination of this function is included directly from + * 'arch/arm64/util/unwind-libunwind.c', to make sure that this function + * is defined no matter what arch the host is. + * + * Finally, the arch specific unwind methods are exported which will + * be assigned to each arm64 thread. + */ + +#define REMOTE_UNWIND_LIBUNWIND + +/* Define arch specific functions & regs for libunwind, should be + * defined before including "unwind.h" + */ +#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__sw_64_reg_id(regnum) +#define LIBUNWIND__ARCH_REG_IP PERF_REG_SW64_PC +#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_SP + +#include "unwind.h" +#include "debug.h" +#include "libunwind-sw_64.h" +#include <../../../arch/sw_64/include/uapi/asm/perf_regs.h> +#include "../../arch/sw_64/util/unwind-libunwind.c" + +#include "util/unwind-libunwind-local.c" + +struct unwind_libunwind_ops * +sw64_unwind_libunwind_ops = &_unwind_libunwind_ops; -- Gitee From 57ec1896c8d72f8382eeb97a5a957ce8ad4f0662 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:27:37 +0800 Subject: [PATCH 347/953] anolis: perf: fix sw64 support ANBZ: #4688 Modify generic Build, Makefiles and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/perf/Makefile.config | 13 +++++++++++++ tools/perf/tests/Build | 2 +- tools/perf/util/Build | 1 + tools/perf/util/annotate.c | 3 +++ tools/perf/util/env.c | 2 ++ tools/perf/util/unwind-libunwind.c | 4 ++++ 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index d66b52407e19..a48258330cc0 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -94,6 +94,12 @@ ifeq ($(SRCARCH),csky) NO_PERF_REGS := 0 endif +ifeq ($(SRCARCH),sw_64) + NO_PERF_REGS := 0 + CFLAGS += -mieee + LIBUNWIND_LIBS = -lunwind -lunwind-sw_64 +endif + ifeq ($(ARCH),s390) NO_PERF_REGS := 0 CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated @@ -640,6 +646,13 @@ ifndef NO_LIBUNWIND CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64 endif endif + ifeq ($(feature-libunwind-sw_64), 1) + $(call detected,CONFIG_LIBUNWIND_SW64) + CFLAGS += -DHAVE_LIBUNWIND_SW_64_SUPPORT + LDFLAGS += -lunwind-sw_64 + EXTLIBS_LIBUNWIND += -lunwind-sw_64 + have_libunwind = 1 + endif ifneq ($(feature-libunwind), 1) msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 63d5e6d5f165..5b51f34b5bf3 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -67,7 +67,7 @@ perf-y += sigtrap.o perf-y += event_groups.o perf-y += symbols.o -ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc sw_64)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 6d657c9927f7..89a051732e87 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -199,6 +199,7 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o +perf-$(CONFIG_LIBUNWIND_SW64) += libunwind/sw64.o ifeq ($(CONFIG_LIBTRACEEVENT),y) perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 82956adf9963..88440bc7cc71 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -180,6 +180,9 @@ static struct arch architectures[] = { .comment_char = '#', }, }, + { + .name = "sw_64", + }, { .name = "x86", .init = x86__annotate_init, diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index d2c7b6e6eae5..8175df5df556 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -436,6 +436,8 @@ static const char *normalize_arch(char *arch) return "arm64"; if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) return "arm"; + if (!strncmp(arch, "sw_64", 5)) + return "sw_64"; if (!strncmp(arch, "s390", 4)) return "s390"; if (!strncmp(arch, "parisc", 6)) diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80a8..c2e84a827e33 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -11,6 +11,7 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; +struct unwind_libunwind_ops __weak *sw64_unwind_libunwind_ops; static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) { @@ -53,6 +54,9 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { if (dso_type == DSO__TYPE_64BIT) ops = arm64_unwind_libunwind_ops; + } else if (!strcmp(arch, "sw_64")) { + if (dso_type == DSO__TYPE_64BIT) + ops = sw64_unwind_libunwind_ops; } if (!ops) { -- Gitee From d80167f9234995d18efba3cc80ee05ce0bda2eb3 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:29:33 +0800 Subject: [PATCH 348/953] anolis: selftests: fix sw64 support ANBZ: #4688 Modify generic routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- .../ftrace/test.d/kprobe/kprobe_args_string.tc | 3 +++ .../ftrace/test.d/kprobe/kprobe_args_syntax.tc | 4 ++++ .../testing/selftests/mm/virtual_address_range.c | 5 +++++ .../testing/selftests/seccomp/seccomp_benchmark.c | 5 +++++ tools/testing/selftests/seccomp/seccomp_bpf.c | 15 ++++++++++++++- 5 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index a202b2ea4baf..f1f2f3722e93 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -31,6 +31,9 @@ mips*) loongarch*) ARG1=%r4 ;; +sw_64) + ARG1=%r16 +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 1df61e13a812..8de38fb00bae 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -44,6 +44,10 @@ loongarch*) GOODREG=%r4 BADREG=%r12 ;; +sw_64) + GOODREG=%r16 + BADREG=%ps +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c index bae0ceaf95b1..76efbd5637cb 100644 --- a/tools/testing/selftests/mm/virtual_address_range.c +++ b/tools/testing/selftests/mm/virtual_address_range.c @@ -54,6 +54,11 @@ #define HIGH_ADDR_SHIFT 49 #define NR_CHUNKS_LOW NR_CHUNKS_256TB #define NR_CHUNKS_HIGH NR_CHUNKS_3840TB +#elif defined __sw_64__ +#define HIGH_ADDR_MARK ADDR_MARK_128TB * 32UL +#define HIGH_ADDR_SHIFT 53 +#define NR_CHUNKS_LOW NR_CHUNKS_128TB * 32UL +#define NR_CHUNKS_HIGH 0 #else #define HIGH_ADDR_MARK ADDR_MARK_128TB #define HIGH_ADDR_SHIFT 48 diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 5b5c9d558dee..7004099ce11b 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -20,6 +20,11 @@ #include "../kselftest.h" +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + unsigned long long timing(clockid_t clk_id, unsigned long long samples) { struct timespec start, finish; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 38f651469968..66954558220e 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -66,6 +66,11 @@ # define PR_SET_PTRACER 0x59616d61 #endif +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 @@ -142,6 +147,8 @@ struct seccomp_data { # define __NR_seccomp 372 # elif defined(__mc68000__) # define __NR_seccomp 380 +# elif defined(__sw_64__) +# define __NR_seccomp 514 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff @@ -1850,6 +1857,12 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM(_regs) (_regs).orig_d0 # define SYSCALL_RET(_regs) (_regs).d0 +#elif defined(__sw_64__) +# define ARCH_REGS struct user_pt_regs +# define SYSCALL_NUM(_regs) (_regs).regs[0] +# define SYSCALL_RET(_regs) (_regs).regs[0] +# define SYSCALL_RET_SET(_regs, _val) \ + TH_LOG("Can't modify syscall return on this architecture") #else # error "Do not know how to find your architecture's registers and syscalls" #endif @@ -1914,7 +1927,7 @@ const bool ptrace_entry_set_syscall_ret = * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ -#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) +#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) || defined(__sw_64__) # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) #else -- Gitee From 2e6a48e4324b8e2d6321bc0b77602971544de431 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:15 +0800 Subject: [PATCH 349/953] anolis: drivers: acpi: add sw64 support ANBZ: #4688 Add acpi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/acpi/acpi_apd.c | 19 ++++++++++++++++++- drivers/acpi/numa/Kconfig | 2 +- drivers/acpi/numa/srat.c | 2 +- drivers/acpi/pci_mcfg.c | 26 ++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 80f945cbec8a..791f4b234e02 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -40,7 +40,8 @@ struct apd_private_data { const struct apd_device_desc *dev_desc; }; -#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) +#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +defined(CONFIG_ARM64) || defined(CONFIG_SW64) #define APD_ADDR(desc) ((unsigned long)&desc) static int acpi_apd_setup(struct apd_private_data *pdata) @@ -178,6 +179,18 @@ static const struct apd_device_desc hip08_spi_desc = { }; #endif /* CONFIG_ARM64 */ +#ifdef CONFIG_SW64 +static const struct apd_device_desc sunway_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; + +static const struct apd_device_desc sunway_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; +#endif + #endif /* @@ -246,6 +259,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, { "NXP0001", APD_ADDR(nxp_i2c_desc) }, +#endif +#ifdef CONFIG_SW64 + { "HISI02A1", APD_ADDR(sunway_i2c_desc) }, + { "HISI0173", APD_ADDR(sunway_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig index 39b1f34c21df..67d1f40bfa9f 100644 --- a/drivers/acpi/numa/Kconfig +++ b/drivers/acpi/numa/Kconfig @@ -2,7 +2,7 @@ config ACPI_NUMA bool "NUMA support" depends on NUMA - depends on (X86 || IA64 || ARM64 || LOONGARCH) + depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) default y if IA64 || ARM64 config ACPI_HMAT diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index b57de78fbf14..aad85ccae2e0 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -206,7 +206,7 @@ int __init srat_disabled(void) return acpi_numa < 0; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 860014b89b8e..1dccb26b2b7f 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -182,6 +182,32 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), #endif /* LOONGARCH */ + +#ifdef CONFIG_SW64 +#define _SW64_ECAM_QUIRK(rev, seg) \ + { "SUNWAY", "SUNWAY. ", rev, seg, MCFG_BUS_ANY, &sw64_pci_ecam_ops } +#define SW64_ECAM_QUIRK(rev, node) _SW64_ECAM_QUIRK(rev, node * 8 + 0),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 1),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 2),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 3),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 4),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 5),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 6),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 7) + + /** + * According to the address space of sw64, up to 8 nodes supported + * with a maximum of 8 pcie controllers per node + */ + SW64_ECAM_QUIRK(1, 0x00), + SW64_ECAM_QUIRK(1, 0x01), + SW64_ECAM_QUIRK(1, 0x02), + SW64_ECAM_QUIRK(1, 0x03), + SW64_ECAM_QUIRK(1, 0x04), + SW64_ECAM_QUIRK(1, 0x05), + SW64_ECAM_QUIRK(1, 0x06), + SW64_ECAM_QUIRK(1, 0x07), +#endif /* SW64 */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; -- Gitee From e93a480ddb92d3ec15d4ee8461ef608ab35acba5 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:42:24 +0800 Subject: [PATCH 350/953] anolis: drivers: clocksource: add sw64 support ANBZ: #4688 Add clocksource driver for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/clocksource/Kconfig | 3 + drivers/clocksource/Makefile | 1 + drivers/clocksource/timer-sw64.c | 411 +++++++++++++++++++++++++++++++ 3 files changed, 415 insertions(+) create mode 100644 drivers/clocksource/timer-sw64.c diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 0ba0dc4ecf06..2f25ee006164 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -732,4 +732,7 @@ config GOLDFISH_TIMER help Support for the timer/counter of goldfish-rtc +config SW64_TIMER + bool + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 368c3461dab8..b9ef4c79915e 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o +obj-$(CONFIG_SW64_TIMER) += timer-sw64.o diff --git a/drivers/clocksource/timer-sw64.c b/drivers/clocksource/timer-sw64.c new file mode 100644 index 000000000000..a124b6d8fed9 --- /dev/null +++ b/drivers/clocksource/timer-sw64.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define SHTCLK_RATE_KHZ 25000 +#define SHTCLK_RATE (SHTCLK_RATE_KHZ * 1000) + +#if defined(CONFIG_SUBARCH_C4) +static u64 read_longtime(struct clocksource *cs) +{ + return read_csr(CSR_SHTCLOCK); +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 notrace read_sched_clock(void) +{ + return read_csr(CSR_SHTCLOCK); +} + +void __init sw64_setup_clocksource(void) +{ + clocksource_register_khz(&clocksource_longtime, SHTCLK_RATE_KHZ); + sched_clock_register(read_sched_clock, BITS_PER_LONG, SHTCLK_RATE); +} + +void __init setup_sched_clock(void) { } +#elif defined(CONFIG_SUBARCH_C3B) +#ifdef CONFIG_SMP +static u64 read_longtime(struct clocksource *cs) +{ + unsigned long node; + + node = __this_cpu_read(hard_node_id); + return __io_read_longtime(node); +} + +static int longtime_enable(struct clocksource *cs) +{ + switch (cpu_desc.model) { + case CPU_SW3231: + sw64_io_write(0, GPIO_SWPORTA_DR, 0); + sw64_io_write(0, GPIO_SWPORTA_DDR, 0xff); + break; + case CPU_SW831: + __io_write_longtime_start_en(0, 0x1); + break; + default: + break; + } + + return 0; +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .enable = longtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 read_vtime(struct clocksource *cs) +{ + unsigned long vtime_addr; + + vtime_addr = IO_BASE | LONG_TIME; + return rdio64(vtime_addr); +} + +static int vtime_enable(struct clocksource *cs) +{ + return 0; +} + +static struct clocksource clocksource_vtime = { + .name = "vtime", + .rating = 100, + .enable = vtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_vtime, +}; +#else /* !SMP */ +static u64 read_tc(struct clocksource *cs) +{ + return rdtc(); +} + +static struct clocksource clocksource_tc = { + .name = "tc", + .rating = 300, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = read_tc, +}; +#endif /* SMP */ + +#define DEFAULT_MCLK 25 /* Mhz */ + +void __init sw64_setup_clocksource(void) +{ + unsigned int mclk = *((unsigned char *)__va(MB_MCLK)); + + if (!mclk) + mclk = DEFAULT_MCLK; + +#ifdef CONFIG_SMP + if (is_in_host()) + clocksource_register_khz(&clocksource_longtime, mclk * 1000); + else + clocksource_register_khz(&clocksource_vtime, DEFAULT_MCLK * 1000); +#else + clocksource_register_hz(&clocksource_tc, get_cpu_freq()); + pr_info("Setup clocksource TC, mult = %d\n", clocksource_tc.mult); +#endif +} + +DECLARE_PER_CPU(u64, tc_offset); +static u64 sc_start, sc_shift, sc_multi; +DEFINE_STATIC_KEY_FALSE(use_tc_as_sched_clock); + +static int __init sched_clock_setup(char *opt) +{ + if (!opt) + return -EINVAL; + + if (!strncmp(opt, "on", 2)) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("Using TC instead of jiffies as source of sched_clock()\n"); + } + + return 0; +} +early_param("tc_sched_clock", sched_clock_setup); + +static void __init calibrate_sched_clock(void) +{ + sc_start = rdtc(); +} + +void __init setup_sched_clock(void) +{ + unsigned long step; + + sc_shift = 7; + step = 1UL << sc_shift; + sc_multi = step * NSEC_PER_SEC / get_cpu_freq(); + calibrate_sched_clock(); + + pr_info("sched_clock: sc_multi=%llu, sc_shift=%llu\n", sc_multi, sc_shift); +} + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +static u64 notrace read_sched_clock(void) +{ + return (rdtc() - sc_start) >> sc_shift; +} + +void __init sw64_sched_clock_init(void) +{ + sched_clock_register(sched_clock_read, BITS_PER_LONG, get_cpu_freq() >> sc_shift); +} +#else /* !CONFIG_GENERIC_SCHED_CLOCK */ +/* + * scheduler clock - returns current time in nanoseconds. + */ +unsigned long long notrace sched_clock(void) +{ + if (static_branch_likely(&use_tc_as_sched_clock)) + return ((rdtc() - sc_start + __this_cpu_read(tc_offset)) >> sc_shift) * sc_multi; + else + return (jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t sched_clock_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[2]; + + if (static_key_enabled(&use_tc_as_sched_clock)) + buf[0] = 'Y'; + else + buf[0] = 'N'; + buf[1] = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sched_clock_status_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int r; + bool bv; + bool val = static_key_enabled(&use_tc_as_sched_clock); + + r = kstrtobool_from_user(user_buf, count, &bv); + if (!r) { + if (val != bv) { + if (bv) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from jiffies to TC\n"); + } else { + static_branch_disable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from TC to jiffies\n"); + } + } else { + if (val) + pr_info("source of sched_clock() unchanged (using TC)\n"); + else + pr_info("source of sched_clock() unchanged (using jiffies)\n"); + } + } + + return count; +} + +static const struct file_operations sched_clock_status_fops = { + .read = sched_clock_status_read, + .write = sched_clock_status_write, + .open = nonseekable_open, + .llseek = no_llseek, +}; + +static int __init sched_clock_debug_init(void) +{ + struct dentry *sched_clock_status; + + if (!sw64_debugfs_dir) + return -ENODEV; + + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, + &sched_clock_status_fops); + + if (!sched_clock_status) + return -ENOMEM; + + return 0; +} +late_initcall(sched_clock_debug_init); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_GENERIC_SCHED_CLOCK */ + +#endif + + + +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt); +static int timer_set_shutdown(struct clock_event_device *evt); +static int timer_set_oneshot(struct clock_event_device *evt); + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device timer_clockevent = { + .name = "timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = timer_set_shutdown, + .set_state_oneshot = timer_set_oneshot, + .set_next_event = timer_next_event, + .rating = 300, + .irq = -1, +}; + +static int vtimer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, delta, 0, 0); + return 0; +} + +static int vtimer_shutdown(struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + return 0; +} + +static int vtimer_set_oneshot(struct clock_event_device *evt) +{ + return 0; +} +static struct clock_event_device vtimer_clockevent = { + .name = "vtimer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = vtimer_shutdown, + .set_state_oneshot = vtimer_set_oneshot, + .set_next_event = vtimer_next_event, + .rating = 300, + .irq = -1, +}; + +static DEFINE_PER_CPU(struct clock_event_device, timer_events); + +/* + * Program the next event, relative to now + */ +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + wrtimer(delta); + return 0; +} + +static int timer_set_shutdown(struct clock_event_device *evt) +{ + wrtimer(0); + return 0; +} + +static int timer_set_oneshot(struct clock_event_device *evt) +{ + /* + * SW-TIMER support CLOCK_EVT_MODE_ONESHOT only, and automatically. + * unlike PIT and HPET, which support ONESHOT or PERIODIC by setting PIT_MOD or HPET_Tn_CFG + * so, nothing to do here ... + */ + return 0; +} + +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); + else { + clockevents_calc_mult_shift(swevt, freq, 4); + swevt->min_delta_ns = clockevent_delta2ns(swevt->min_delta_ticks, swevt); + swevt->max_delta_ns = clockevent_delta2ns(swevt->max_delta_ticks, swevt); + } +} + +/* + * Setup the local timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +void sw64_setup_timer(void) +{ + unsigned long min_delta; + int cpu = smp_processor_id(); + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + /* min_delta ticks => 100ns */ + min_delta = get_cpu_freq()/1000/1000/10; + + if (is_in_guest()) { + memcpy(swevt, &vtimer_clockevent, sizeof(*swevt)); + /* + * CUIWEI: This value is very important. + * If it's too small, the timer will timeout when the IER + * haven't been opened. + */ + min_delta *= 4; + } else { + memcpy(swevt, &timer_clockevent, sizeof(*swevt)); + } + swevt->cpumask = cpumask_of(cpu); + swevt->set_state_shutdown(swevt); + clockevents_config_and_register(swevt, get_cpu_freq(), min_delta, ULONG_MAX); +} + +void sw64_timer_interrupt(void) +{ + struct clock_event_device *evt = this_cpu_ptr(&timer_events); + + irq_enter(); + if (!evt->event_handler) { + pr_warn("Spurious local timer interrupt on cpu %d\n", + smp_processor_id()); + timer_set_shutdown(evt); + return; + } + + inc_irq_stat(timer_irqs_event); + + evt->event_handler(evt); + + irq_exit(); +} -- Gitee From 8c1531a2124d0065e6b8a862a4f958868cef6e9f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:42:49 +0800 Subject: [PATCH 351/953] anolis: drivers: cpufreq: add sw64 support ANBZ: #4688 Add cpufreq drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/cpufreq/Kconfig | 23 ++++ drivers/cpufreq/Makefile | 2 + drivers/cpufreq/sw64_cpufreq.c | 175 +++++++++++++++++++++++++ drivers/cpufreq/sw64_cpufreq_debugfs.c | 101 ++++++++++++++ 4 files changed, 301 insertions(+) create mode 100644 drivers/cpufreq/sw64_cpufreq.c create mode 100644 drivers/cpufreq/sw64_cpufreq_debugfs.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index b14584bfdf3f..d1fdea27eb0d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -314,6 +314,29 @@ config SH_CPU_FREQ If unsure, say N. endif +if SW64 +config SW64_CPUFREQ + bool "SW64 CPU Frequency interface" + depends on UNCORE_XUELANG + default y + help + This adds the CPUFreq driver for SW64 processor which supports + software configurable cpu frequency. + + For details, take a look at . + + If unsure, say N. + +config SW64_CPUFREQ_DEBUGFS + bool "SW64 CPU Frequency debugfs interface" + depends on SW64_CPUFREQ && DEBUG_FS + default y + help + Turns on the DebugFS interface for CPU Frequency. + + If you don't know what to do here, say N. +endif + config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 076ea3ac1b56..f9c1c9012ce7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -108,3 +108,5 @@ obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 000000000000..f4bf5f3cc550 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu = freqs->policy->cpu; + + if (val == CPUFREQ_POSTCHANGE) + sw64_update_clockevents(cpu, freqs->new * 1000); + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return __sw64_cpufreq_get(policy); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + + if (!cpu_online(cpu)) + return -ENODEV; + + /* setting the cpu frequency */ + sw64_set_rate(index); + update_cpu_freq(freq_table[index].frequency); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, freq_table, 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + if (is_in_guest()) { + pr_warn("Now sw_64 CPUFreq does not support virtual machines\n"); + return -ENODEV; + } + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/sw64_cpufreq_debugfs.c b/drivers/cpufreq/sw64_cpufreq_debugfs.c new file mode 100644 index 000000000000..bb4ae26bc22b --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq_debugfs.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + +static int cpufreq_show(struct seq_file *m, void *v) +{ + int i; + u64 val; + int freq; + + val = sw64_io_read(0, CLK_CTL); + val = val >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (val == i) + seq_printf(m, "[%d] ", freq); + else + seq_printf(m, "%d ", freq); + } + seq_puts(m, "\n"); + + return 0; +} + +static int cpufreq_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpufreq_show, NULL); +} + +static ssize_t cpufreq_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + char buf[5]; + size_t size; + int cf, i, err, index, freq; + + size = min(sizeof(buf) - 1, len); + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + buf[size] = '\0'; + + err = kstrtoint(buf, 10, &cf); + if (err) + return err; + + index = -1; + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (cf == freq) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_set_rate(index); + update_cpu_freq(freq); + return len; +} + +static const struct file_operations set_cpufreq_fops = { + .open = cpufreq_open, + .read = seq_read, + .write = cpufreq_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpufreq_debugfs_init(void) +{ + struct dentry *cpufreq_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + cpufreq_entry = debugfs_create_file("cpufreq", 0600, + sw64_debugfs_dir, NULL, + &set_cpufreq_fops); + if (!cpufreq_entry) + return -ENOMEM; + + return 0; +} +late_initcall(cpufreq_debugfs_init); -- Gitee From 1da1b0cc60b631c57308298dc3d1d2dc7b9e1348 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:47:32 +0800 Subject: [PATCH 352/953] anolis: drivers: efi: add sw64 support ANBZ: #4688 Add efi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/firmware/efi/Kconfig | 2 +- drivers/firmware/efi/Makefile | 2 + drivers/firmware/efi/efi.c | 2 +- drivers/firmware/efi/sunway-init.c | 221 ++++++++++++++++++++++++++ drivers/firmware/efi/sunway-runtime.c | 85 ++++++++++ 5 files changed, 310 insertions(+), 2 deletions(-) create mode 100644 drivers/firmware/efi/sunway-init.c create mode 100644 drivers/firmware/efi/sunway-runtime.c diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 231f1c70d1db..138491a4b494 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -224,7 +224,7 @@ config EFI_DISABLE_PCI_DMA config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on SERIAL_EARLYCON && !ARM && !IA64 && !SW64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index b4528af86517..7c1b924e8ea3 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -35,6 +35,8 @@ obj-$(CONFIG_SYSFB) += sysfb_efi.o arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) +sw64-obj-$(CONFIG_EFI) := sunway-init.o sunway-runtime.o +obj-$(CONFIG_SW64) += $(sw64-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) #obj-$(CONFIG_LOONGARCH) += efi-init.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2c1095dcc2f2..f5b7f34e8069 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -809,7 +809,7 @@ int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) return 0; } -#ifndef CONFIG_IA64 +#if !defined(CONFIG_IA64) && !defined(CONFIG_SW64) static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, size_t size) { diff --git a/drivers/firmware/efi/sunway-init.c b/drivers/firmware/efi/sunway-init.c new file mode 100644 index 000000000000..870abc2f5afe --- /dev/null +++ b/drivers/firmware/efi/sunway-init.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "efi: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +unsigned long entSuspend; +unsigned long bios_version; + +static int __init is_memory(efi_memory_desc_t *md) +{ + if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) + return 1; + return 0; +} +static efi_config_table_type_t arch_tables[] __initdata = { + {SMBIOS3_TABLE_GUID, NULL, ""}, + {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"}, + {BIOS_VERSION_GUID, &bios_version, "BIOS VERSION"}, + {}, +}; + +static int __init uefi_init(u64 efi_system_table) +{ + efi_char16_t *c16; + efi_config_table_t *config_tables; + efi_system_table_t *systab; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff); + + efi.runtime = systab->runtime; + efi.runtime_version = systab->hdr.revision; + + /* Show what we know for posterity */ + c16 = early_memremap(systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * systab->nr_tables; + config_tables = early_memremap(systab->tables, table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + + retval = efi_config_parse_tables(config_tables, systab->nr_tables, + arch_tables); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(systab, sizeof(efi_system_table_t)); + + if (!bios_version) + retval = -EINVAL; + + return retval; +} + +/* + * Return true for regions that can be used as System RAM. + */ +static __init int is_usable_memory(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + /* + * According to the spec, these regions are no longer reserved + * after calling ExitBootServices(). However, we can only use + * them as System RAM if they can be mapped writeback cacheable. + */ + return (md->attribute & EFI_MEMORY_WB); + default: + break; + } + return false; +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s\n", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_memory(md)) { + early_init_dt_add_memory_arch(paddr, size); + + if (!is_usable_memory(md)) + memblock_mark_nomap(paddr, size); + + /* keep ACPI reclaim memory intact for kexec etc. */ + if (md->type == EFI_ACPI_RECLAIM_MEMORY) + memblock_reserve(paddr, size); + } + } +} + +void __init efi_init(void) +{ + struct efi_memory_map_data data; + u64 efi_system_table; + + if (sunway_boot_params->efi_systab == 0) { + pr_info("System Table is not exist, disabling EFI.\n"); + return; + } + + /* Grab UEFI information placed in struct boot_params by stub */ + efi_system_table = sunway_boot_params->efi_systab; + if (!efi_system_table) + return; + + data.desc_version = sunway_boot_params->efi_memdesc_version; + data.desc_size = sunway_boot_params->efi_memdesc_size; + data.size = sunway_boot_params->efi_memmap_size; + data.phys_map = sunway_boot_params->efi_memmap; + + if (efi_memmap_init_early(&data) < 0) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + + WARN(efi.memmap.desc_version != 1, + "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", + efi.memmap.desc_version); + + if (uefi_init(efi_system_table) < 0) { + efi_memmap_unmap(); + return; + } + + reserve_regions(); + + memblock_reserve(sunway_boot_params->efi_memmap & PAGE_MASK, + PAGE_ALIGN(sunway_boot_params->efi_memmap_size + + (sunway_boot_params->efi_memmap & ~PAGE_MASK))); + +} diff --git a/drivers/firmware/efi/sunway-runtime.c b/drivers/firmware/efi/sunway-runtime.c new file mode 100644 index 000000000000..6bd96cff7d5d --- /dev/null +++ b/drivers/firmware/efi/sunway-runtime.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init sunway_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + efi_memmap_unmap(); + + mapsize = efi.memmap.desc_size * efi.memmap.nr_map; + + if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) { + pr_err("Failed to remap EFI memory map\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("EFI runtime services access via paravirt.\n"); + return 0; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + return 0; +} +early_initcall(sunway_enable_runtime_services); + + +static int __init sunway_dmi_init(void) +{ + /* + * On SW64, DMI depends on UEFI, and dmi_scan_machine() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_setup(); + return 0; +} +core_initcall(sunway_dmi_init); -- Gitee From 9f0950922029e051b90921974eb5fd96ab33b522 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:43:45 +0800 Subject: [PATCH 353/953] anolis: drivers: gpio: add sw64 support ANBZ: #4688 Add gpio drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/gpio/Kconfig | 9 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-sunway.c | 861 ++++++++++++++++++++++ include/linux/platform_data/gpio-sunway.h | 33 + 4 files changed, 904 insertions(+) create mode 100644 drivers/gpio/gpio-sunway.c create mode 100644 include/linux/platform_data/gpio-sunway.h diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d56b835359d3..e52e8b5ae88e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -246,6 +246,15 @@ config GPIO_DWAPB Say Y or M here to build support for the Synopsys DesignWare APB GPIO block. +config GPIO_SUNWAY + tristate "Sunway gpio driver" + depends on SW64 + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + help + Say Y or M here to build support for the Sunway + GPIO block. + config GPIO_EIC_SPRD tristate "Spreadtrum EIC support" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index eb73b5d633eb..e44a700ec7d3 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -195,3 +195,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o +obj-$(CONFIG_GPIO_SUNWAY) += gpio-sunway.o diff --git a/drivers/gpio/gpio-sunway.c b/drivers/gpio/gpio-sunway.c new file mode 100644 index 000000000000..b9c6848317db --- /dev/null +++ b/drivers/gpio/gpio-sunway.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011 Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * All enquiries to support@picochip.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" +#include "gpiolib-acpi.h" + + +#define GPIO_SWPORTA_DR (0x00UL<<7) +#define GPIO_SWPORTA_DDR (0X04UL<<7) +#define GPIO_SWPORTB_DR (0X0CUL<<7) +#define GPIO_SWPORTB_DDR (0X10UL<<7) +#define GPIO_SWPORTC_DR (0x18UL<<7) +#define GPIO_SWPORTC_DDR (0x1cUL<<7) +#define GPIO_SWPORTD_DR (0x24UL<<7) +#define GPIO_SWPORTD_DDR (0x28UL<<7) +#define GPIO_INTEN (0x30UL<<7) +#define GPIO_INTMASK (0x34UL<<7) +#define GPIO_INTTYPE_LEVEL (0x38UL<<7) +#define GPIO_INT_POLARITY (0x3cUL<<7) +#define GPIO_INTSTATUS (0x40UL<<7) +#define GPIO_PORTA_DEBOUNCE (0x48UL<<7) +#define GPIO_PORTA_EOI (0x4cUL<<7) +#define GPIO_EXT_PORTA (0x50UL<<7) +#define GPIO_EXT_PORTB (0x54UL<<7) +#define GPIO_EXT_PORTC (0x58UL<<7) +#define GPIO_EXT_PORTD (0x5cUL<<7) + +#define DWAPB_MAX_PORTS 4 +#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ +#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ +#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ + +#define GPIO_REG_OFFSET_V2 1 + +#define GPIO_INTMASK_V2 0x44 +#define GPIO_INTTYPE_LEVEL_V2 0x34 +#define GPIO_INT_POLARITY_V2 0x38 +#define GPIO_INTSTATUS_V2 0x3c +#define GPIO_PORTA_EOI_V2 0x40 + +struct sunway_gpio; + +#ifdef CONFIG_PM_SLEEP +/* Store GPIO context across system-wide suspend/resume transitions */ +struct sunway_context { + u32 data; + u32 dir; + u32 ext; + u32 int_en; + u32 int_mask; + u32 int_type; + u32 int_pol; + u32 int_deb; + u32 wake_en; +}; +#endif + +struct sunway_gpio_port { + struct gpio_chip gc; + bool is_registered; + struct sunway_gpio *gpio; +#ifdef CONFIG_PM_SLEEP + struct sunway_context *ctx; +#endif + unsigned int idx; +}; + +struct sunway_gpio { + struct device *dev; + void __iomem *regs; + struct sunway_gpio_port *ports; + unsigned int nr_ports; + struct irq_domain *domain; + unsigned int flags; + struct reset_control *rst; + struct clk *clk; +}; + +static inline u32 gpio_reg_v2_convert(unsigned int offset) +{ + switch (offset) { + case GPIO_INTMASK: + return GPIO_INTMASK_V2; + case GPIO_INTTYPE_LEVEL: + return GPIO_INTTYPE_LEVEL_V2; + case GPIO_INT_POLARITY: + return GPIO_INT_POLARITY_V2; + case GPIO_INTSTATUS: + return GPIO_INTSTATUS_V2; + case GPIO_PORTA_EOI: + return GPIO_PORTA_EOI_V2; + } + + return offset; +} + +static inline u32 gpio_reg_convert(struct sunway_gpio *gpio, unsigned int offset) +{ + if (gpio->flags & GPIO_REG_OFFSET_V2) + return gpio_reg_v2_convert(offset); + + return offset; +} + +static inline u32 sunway_read(struct sunway_gpio *gpio, unsigned int offset) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset)); +} + +static inline void sunway_write(struct sunway_gpio *gpio, unsigned int offset, + u32 val) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val); +} + +static int sunway_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + + return irq_find_mapping(gpio->domain, offset); +} + +static struct sunway_gpio_port *sunway_offs_to_port(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port; + int i; + + for (i = 0; i < gpio->nr_ports; i++) { + port = &gpio->ports[i]; + if (port->idx == offs / 32) + return port; + } + + return NULL; +} + +static void sunway_toggle_trigger(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port = sunway_offs_to_port(gpio, offs); + struct gpio_chip *gc; + u32 pol; + int val; + + if (!port) + return; + gc = &port->gc; + + pol = sunway_read(gpio, GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offs % 32); + if (val) + pol &= ~BIT(offs); + else + pol |= BIT(offs); + + sunway_write(gpio, GPIO_INT_POLARITY, pol); +} + +static u32 sunway_do_irq(struct sunway_gpio *gpio) +{ + u32 irq_status = sunway_read(gpio, GPIO_INTSTATUS); + u32 ret = irq_status; + + while (irq_status) { + int hwirq = fls(irq_status) - 1; + int gpio_irq = irq_find_mapping(gpio->domain, hwirq); + + generic_handle_irq(gpio_irq); + irq_status &= ~BIT(hwirq); + + if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK) + == IRQ_TYPE_EDGE_BOTH) + sunway_toggle_trigger(gpio, hwirq); + } + + return ret; +} + +static void sunway_irq_handler(struct irq_desc *desc) +{ + struct sunway_gpio *gpio = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + sunway_do_irq(gpio); + + if (chip->irq_eoi) + chip->irq_eoi(irq_desc_get_irq_data(desc)); +} + +static void sunway_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val |= BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static void sunway_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val &= ~BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static int sunway_irq_reqres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int ret; + + ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d)); + if (ret) { + dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + return ret; + } + return 0; +} + +static void sunway_irq_relres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + + gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d)); +} + +static int sunway_irq_set_type(struct irq_data *d, u32 type) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int bit = d->hwirq; + unsigned long level, polarity, flags; + + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + level = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + polarity = sunway_read(gpio, GPIO_INT_POLARITY); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + level |= BIT(bit); + sunway_toggle_trigger(gpio, bit); + break; + case IRQ_TYPE_EDGE_RISING: + level |= BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_EDGE_FALLING: + level |= BIT(bit); + polarity &= ~BIT(bit); + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~BIT(bit); + polarity &= ~BIT(bit); + break; + } + + irq_setup_alt_chip(d, type); + + sunway_write(gpio, GPIO_INTTYPE_LEVEL, level); + if (type != IRQ_TYPE_EDGE_BOTH) + sunway_write(gpio, GPIO_INT_POLARITY, polarity); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct sunway_context *ctx = gpio->ports[0].ctx; + + if (enable) + ctx->wake_en |= BIT(d->hwirq); + else + ctx->wake_en &= ~BIT(d->hwirq); + + return 0; +} +#endif + +static int sunway_gpio_set_debounce(struct gpio_chip *gc, + unsigned int offset, unsigned int debounce) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + unsigned long flags, val_deb; + unsigned long mask = BIT(offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + val_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + if (debounce) + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask); + else + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +static int sunway_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + u32 debounce; + + if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) + return -ENOTSUPP; + + debounce = pinconf_to_config_argument(config); + return sunway_gpio_set_debounce(gc, offset, debounce); +} + +static irqreturn_t sunway_irq_handler_mfd(int irq, void *dev_id) +{ + u32 worked; + struct sunway_gpio *gpio = dev_id; + + worked = sunway_do_irq(gpio); + + return worked ? IRQ_HANDLED : IRQ_NONE; +} + +static void sunway_configure_irqs(struct sunway_gpio *gpio, + struct sunway_gpio_port *port, + struct sunway_port_property *pp) +{ + struct gpio_chip *gc = &port->gc; + struct fwnode_handle *fwnode = pp->fwnode; + struct irq_chip_generic *irq_gc = NULL; + unsigned int hwirq, ngpio = gc->ngpio; + struct irq_chip_type *ct; + int err, i; + + gpio->domain = irq_domain_create_linear(fwnode, ngpio, + &irq_generic_chip_ops, gpio); + if (!gpio->domain) + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, + "gpio-dwapb", handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { + dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); + if (!irq_gc) { + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc->reg_base = gpio->regs; + irq_gc->private = gpio; + + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = sunway_irq_set_type; + ct->chip.irq_enable = sunway_irq_enable; + ct->chip.irq_disable = sunway_irq_disable; + ct->chip.irq_request_resources = sunway_irq_reqres; + ct->chip.irq_release_resources = sunway_irq_relres; +#ifdef CONFIG_PM_SLEEP + ct->chip.irq_set_wake = sunway_irq_set_wake; +#endif + ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI); + ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK); + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; + + if (!pp->irq_shared) { + int i; + + for (i = 0; i < pp->ngpio; i++) { + if (pp->irq[i] >= 0) + irq_set_chained_handler_and_data(pp->irq[i], + sunway_irq_handler, gpio); + } + } else { + /* + * Request a shared IRQ since where MFD would have devices + * using the same irq pin + */ + err = devm_request_irq(gpio->dev, pp->irq[0], + sunway_irq_handler_mfd, + IRQF_SHARED, "gpio-dwapb-mfd", gpio); + if (err) { + dev_err(gpio->dev, "error requesting IRQ\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + } + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_create_mapping(gpio->domain, hwirq); + + port->gc.to_irq = sunway_gpio_to_irq; +} + +static void sunway_irq_teardown(struct sunway_gpio *gpio) +{ + struct sunway_gpio_port *port = &gpio->ports[0]; + struct gpio_chip *gc = &port->gc; + unsigned int ngpio = gc->ngpio; + irq_hw_number_t hwirq; + + if (!gpio->domain) + return; + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); + + irq_domain_remove(gpio->domain); + gpio->domain = NULL; +} + +static int sunway_gpio_add_port(struct sunway_gpio *gpio, + struct sunway_port_property *pp, + unsigned int offs) +{ + struct sunway_gpio_port *port; + void __iomem *dat, *set, *dirout; + int err; + + port = &gpio->ports[offs]; + port->gpio = gpio; + port->idx = pp->idx; + +#ifdef CONFIG_PM_SLEEP + port->ctx = devm_kzalloc(gpio->dev, sizeof(*port->ctx), GFP_KERNEL); + if (!port->ctx) + return -ENOMEM; +#endif + + dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE); + set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE); + dirout = gpio->regs + GPIO_SWPORTA_DDR + + (pp->idx * GPIO_SWPORT_DDR_STRIDE); + + /* This registers 32 GPIO lines per port */ + err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout, + NULL, 0); + if (err) { + dev_err(gpio->dev, "failed to init gpio chip for port%d\n", + port->idx); + return err; + } + +#ifdef CONFIG_OF_GPIO + port->gc.of_node = to_of_node(pp->fwnode); +#endif + port->gc.ngpio = pp->ngpio; + port->gc.base = pp->gpio_base; + + /* Only port A support debounce */ + if (pp->idx == 0) + port->gc.set_config = sunway_gpio_set_config; + + if (pp->has_irq) + sunway_configure_irqs(gpio, port, pp); + + err = gpiochip_add_data(&port->gc, port); + if (err) + dev_err(gpio->dev, "failed to register gpiochip for port%d\n", + port->idx); + else + port->is_registered = true; + + /* Add GPIO-signaled ACPI event support */ + if (pp->has_irq) + acpi_gpiochip_request_interrupts(&port->gc); + + return err; +} + +static void sunway_gpio_unregister(struct sunway_gpio *gpio) +{ + unsigned int m; + + for (m = 0; m < gpio->nr_ports; ++m) + if (gpio->ports[m].is_registered) + gpiochip_remove(&gpio->ports[m].gc); +} + +static struct sunway_platform_data * +sunway_gpio_get_pdata(struct device *dev) +{ + struct fwnode_handle *fwnode; + struct sunway_platform_data *pdata; + struct sunway_port_property *pp; + int nports; + int i, j; + + nports = device_get_child_node_count(dev); + if (nports == 0) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); + if (!pdata->properties) + return ERR_PTR(-ENOMEM); + + pdata->nports = nports; + + i = 0; + device_for_each_child_node(dev, fwnode) { + struct device_node *np = NULL; + + pp = &pdata->properties[i++]; + pp->fwnode = fwnode; + + if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || + pp->idx >= DWAPB_MAX_PORTS) { + dev_err(dev, + "missing/invalid port index for port%d\n", i); + fwnode_handle_put(fwnode); + return ERR_PTR(-EINVAL); + } + + if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", + &pp->ngpio)) { + dev_info(dev, + "failed to get number of gpios for port%d\n", + i); + pp->ngpio = 32; + } + + pp->irq_shared = false; + pp->gpio_base = -1; + + /* + * Only port A can provide interrupts in all configurations of + * the IP. + */ + if (pp->idx != 0) + continue; + + if (dev->of_node && fwnode_property_read_bool(fwnode, + "interrupt-controller")) { + np = to_of_node(fwnode); + } + + for (j = 0; j < pp->ngpio; j++) { + pp->irq[j] = -ENXIO; + + if (np) + pp->irq[j] = of_irq_get(np, j); + else if (has_acpi_companion(dev)) + pp->irq[j] = platform_get_irq(to_platform_device(dev), j); + + if (pp->irq[j] >= 0) + pp->has_irq = true; + } + + if (!pp->has_irq) + dev_warn(dev, "no irq for port%d\n", pp->idx); + } + + return pdata; +} + +static const struct of_device_id sunway_of_match[] = { + { .compatible = "snps,sw-gpio", .data = (void *)0}, + { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway_of_match); + +static const struct acpi_device_id sunway_acpi_match[] = { + {"HISI0181", 0}, + {"APMC0D07", 0}, + {"APMC0D81", GPIO_REG_OFFSET_V2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, sunway_acpi_match); + +static int sunway_gpio_probe(struct platform_device *pdev) +{ + unsigned int i; + struct resource *res; + struct sunway_gpio *gpio; + int err; + struct device *dev = &pdev->dev; + struct sunway_platform_data *pdata = dev_get_platdata(dev); + + if (!pdata) { + pdata = sunway_gpio_get_pdata(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + if (!pdata->nports) + return -ENODEV; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + gpio->dev = &pdev->dev; + gpio->nr_ports = pdata->nports; + + gpio->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(gpio->rst)) + return PTR_ERR(gpio->rst); + + reset_control_deassert(gpio->rst); + + gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports, + sizeof(*gpio->ports), GFP_KERNEL); + if (!gpio->ports) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + /* Optional bus clock */ + gpio->clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(gpio->clk)) { + err = clk_prepare_enable(gpio->clk); + if (err) { + dev_info(&pdev->dev, "Cannot enable clock\n"); + return err; + } + } + + gpio->flags = 0; + if (dev->of_node) { + gpio->flags = (uintptr_t)of_device_get_match_data(dev); + } else if (has_acpi_companion(dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(sunway_acpi_match, dev); + if (acpi_id) { + if (acpi_id->driver_data) + gpio->flags = acpi_id->driver_data; + } + } + + for (i = 0; i < gpio->nr_ports; i++) { + err = sunway_gpio_add_port(gpio, &pdata->properties[i], i); + if (err) + goto out_unregister; + } + platform_set_drvdata(pdev, gpio); + + return 0; + +out_unregister: + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + clk_disable_unprepare(gpio->clk); + + return err; +} + +static int sunway_gpio_remove(struct platform_device *pdev) +{ + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + reset_control_assert(gpio->rst); + clk_disable_unprepare(gpio->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + ctx->dir = sunway_read(gpio, offset); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + ctx->data = sunway_read(gpio, offset); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + ctx->ext = sunway_read(gpio, offset); + + /* Only port A can provide interrupts */ + if (idx == 0) { + ctx->int_mask = sunway_read(gpio, GPIO_INTMASK); + ctx->int_en = sunway_read(gpio, GPIO_INTEN); + ctx->int_pol = sunway_read(gpio, GPIO_INT_POLARITY); + ctx->int_type = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + ctx->int_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + + /* Mask out interrupts */ + sunway_write(gpio, GPIO_INTMASK, + 0xffffffff & ~ctx->wake_en); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + clk_disable_unprepare(gpio->clk); + + return 0; +} + +static int sunway_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + if (!IS_ERR(gpio->clk)) + clk_prepare_enable(gpio->clk); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + sunway_write(gpio, offset, ctx->data); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + sunway_write(gpio, offset, ctx->dir); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + sunway_write(gpio, offset, ctx->ext); + + /* Only port A can provide interrupts */ + if (idx == 0) { + sunway_write(gpio, GPIO_INTTYPE_LEVEL, ctx->int_type); + sunway_write(gpio, GPIO_INT_POLARITY, ctx->int_pol); + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); + sunway_write(gpio, GPIO_INTEN, ctx->int_en); + sunway_write(gpio, GPIO_INTMASK, ctx->int_mask); + + /* Clear out spurious interrupts */ + sunway_write(gpio, GPIO_PORTA_EOI, 0xffffffff); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sunway_gpio_pm_ops, sunway_gpio_suspend, + sunway_gpio_resume); + +static struct platform_driver sunway_gpio_driver = { + .driver = { + .name = "gpio-sunway", + .pm = &sunway_gpio_pm_ops, + .of_match_table = of_match_ptr(sunway_of_match), + .acpi_match_table = ACPI_PTR(sunway_acpi_match), + }, + .probe = sunway_gpio_probe, + .remove = sunway_gpio_remove, +}; + +module_platform_driver(sunway_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); +MODULE_DESCRIPTION("Sunway GPIO driver"); diff --git a/include/linux/platform_data/gpio-sunway.h b/include/linux/platform_data/gpio-sunway.h new file mode 100644 index 000000000000..58b1bddeb409 --- /dev/null +++ b/include/linux/platform_data/gpio-sunway.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_SUNWAY_H +#define GPIO_SUNWAY_H + +struct sunway_port_property { + struct fwnode_handle *fwnode; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + int irq[32]; + bool has_irq; + bool irq_shared; +}; + +struct sunway_platform_data { + struct sunway_port_property *properties; + unsigned int nports; +}; + +#endif -- Gitee From 32879fd0e6f2764419352719bdc8f11499d51daf Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:35 +0800 Subject: [PATCH 354/953] anolis: drivers: hwmon: add sw64 support ANBZ: #4688 Add hwmon drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/hwmon/Kconfig | 10 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/sw64_pvt.c | 224 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 235 insertions(+) create mode 100644 drivers/hwmon/sw64_pvt.c diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index b9f3e18a3fda..55de59abe3ec 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -38,6 +38,16 @@ config HWMON_DEBUG_CHIP comment "Native drivers" +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index cab312e74d3c..f7da084cfc46 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -221,6 +221,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_SENSORS_PECI) += peci/ diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 000000000000..aedc29d44077 --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, 0444, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, 0444, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); -- Gitee From 561d0ce1db2286c7da7034c14954cb5a6f563dae Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:54:01 +0800 Subject: [PATCH 355/953] anolis: drivers: i2c: add sw64 support ANBZ: #4688 Add i2c drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/i2c/busses/Kconfig | 11 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-designware-common.c | 8 + drivers/i2c/busses/i2c-designware-core.h | 3 +- drivers/i2c/busses/i2c-designware-platdrv.c | 5 + drivers/i2c/busses/i2c-sunway.c | 405 ++++++++++++++++++++ 6 files changed, 432 insertions(+), 1 deletion(-) create mode 100644 drivers/i2c/busses/i2c-sunway.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index ac2c09df6189..5cf07397c041 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -345,6 +345,17 @@ config I2C_ZHAOXIN This driver can also be built as a module. If so, the module will be called i2c-zhaoxin. +config I2C_SUNWAY + tristate "Sunway i2c lib" + depends on SW64 + help + If you say yes to this option, support will be included for the + Sunway Soc I2C interface on SW64 platform. + + This driver can also be built as a module. If so, the module + will be called i2c-sunway. + + if ACPI comment "ACPI drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index f8c8a3554427..738519b0a9cb 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o +obj-$(CONFIG_I2C_SUNWAY) += i2c-sunway.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 35f762872b8a..83803a63e23f 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -63,6 +63,9 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + *val = readl(dev->base + reg); return 0; @@ -72,6 +75,9 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + writel(val, dev->base + reg); return 0; @@ -149,6 +155,8 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) return ret; reg = readl(dev->base + DW_IC_COMP_TYPE); + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = readl(dev->base + (DW_IC_COMP_TYPE << 7)); i2c_dw_release_lock(dev); if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index a7f6f3eafad7..2c97fff25b07 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -308,7 +308,8 @@ struct dw_i2c_dev { #define MODEL_BAIKAL_BT1 BIT(9) #define MODEL_AMD_NAVI_GPU BIT(10) #define MODEL_WANGXUN_SP BIT(11) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_SUNWAY BIT(12) +#define MODEL_MASK GENMASK(12, 8) /* * Enable UCSI interrupt by writing 0xd at register diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 855b698e99c0..c818e9d14b9a 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -150,9 +150,14 @@ static int dw_i2c_of_configure(struct platform_device *pdev) } static const struct of_device_id dw_i2c_of_match[] = { +#ifdef CONFIG_SW64 + { .compatible = "snps,designware-i2c", .data = (void *)MODEL_SUNWAY }, +#else { .compatible = "snps,designware-i2c", }, +#endif { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + { .compatible = "sunway,suntai-i2c", .data = (void *)MODEL_SUNWAY }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); diff --git a/drivers/i2c/busses/i2c-sunway.c b/drivers/i2c/busses/i2c-sunway.c new file mode 100644 index 000000000000..cc7268c6a2da --- /dev/null +++ b/drivers/i2c/busses/i2c-sunway.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 WXIAT Platform Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The drivers in this file are synchronous/blocking. In addition, + * use poll mode to read/write slave devices on the I2C bus instead + * of the interrupt mode. + */ + +#include +#include +#include +#include + +#include + +#define CPLD_BUSNR 2 + +#define IC_CLK_KHZ 25000 + +/* I2C register definitions */ +#define DW_IC_CON 0x0 +#define DW_IC_STATUS 0x3800 +#define DW_IC_DATA_CMD 0x0800 +#define DW_IC_TAR 0x00200 +#define DW_IC_ENABLE 0x3600 +#define DW_IC_CMD 0x0100 +#define DW_IC_STOP 0x0200 +#define DW_IC_SDA_HOLD 0x3e00 +#define DW_IC_SDA_SETUP 0x4a00 +#define DW_IC_SS_SCL_HCNT 0x0a00 +#define DW_IC_SS_SCL_LCNT 0x0c00 +#define DW_IC_FS_SCL_HCNT 0x0e00 +#define DW_IC_FS_SCL_LCNT 0x1000 +#define DW_IC_TX_TL 0x1e00 +#define DW_IC_RX_TL 0x1c00 +#define DW_IC_INTR_MASK 0x1800 + +#define MAX_RETRY 10000000 + +#define DW_IC_STATUS_ACTIVITY 0x1 +#define DW_IC_STATUS_TFNF 0x2 +#define DW_IC_STATUS_TFE 0x4 +#define DW_IC_STATUS_RFNE 0x8 +#define DW_IC_STATUS_RFF 0x10 + +#define DW_IC_CON_MASTER 0x1 +#define DW_IC_CON_SPEED_STD 0x2 +#define DW_IC_CON_SPEED_FAST 0x4 +#define DW_IC_CON_10BITADDR_MASTER 0x10 +#define DW_IC_CON_RESTART_EN 0x20 +#define DW_IC_CON_SLAVE_DISABLE 0x40 + +#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ + DW_IC_CON_SLAVE_DISABLE | \ + DW_IC_CON_RESTART_EN) + +#define DW_IC_INTR_RX_UNDER 0x001 +#define DW_IC_INTR_RX_OVER 0x002 +#define DW_IC_INTR_RX_FULL 0x004 +#define DW_IC_INTR_TX_OVER 0x008 +#define DW_IC_INTR_TX_EMPTY 0x010 +#define DW_IC_INTR_RD_REQ 0x020 +#define DW_IC_INTR_TX_ABRT 0x040 +#define DW_IC_INTR_RX_DONE 0x080 +#define DW_IC_INTR_ACTIVITY 0x100 +#define DW_IC_INTR_STOP_DET 0x200 +#define DW_IC_INTR_START_DET 0x400 +#define DW_IC_INTR_GEN_CALL 0x800 + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_EMPTY | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) + +enum i2c_bus_operation { + I2C_BUS_READ, + I2C_BUS_WRITE, +}; + +static void __iomem *m_i2c_base_address; + +/* + * This function get I2Cx controller base address + * + * @param i2c_controller_index Bus Number of I2C controller. + * @return I2C BAR. + */ +void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index) +{ + switch (i2c_controller_index) { + case 0: + return __va(IO_BASE | IIC0_BASE); + case 1: + return __va(IO_BASE | IIC1_BASE); + case 2: + return __va(IO_BASE | IIC2_BASE); + default: + return NULL; + } +} + +static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data) +{ + writel(data, m_i2c_base_address + offset); +} + +static inline uint32_t read_cpu_i2c_controller(uint64_t offset) +{ + return readl(m_i2c_base_address + offset); +} + +static int poll_for_status_set0(uint16_t status_bit) +{ + uint64_t retry = 0; + uint32_t temp = read_cpu_i2c_controller(DW_IC_STATUS); + + temp = read_cpu_i2c_controller(DW_IC_STATUS); + + while (retry < MAX_RETRY) { + if (read_cpu_i2c_controller(DW_IC_STATUS) & status_bit) + break; + retry++; + } + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static uint32_t i2c_dw_scl_lcnt(uint32_t ic_clk, uint32_t t_low, + uint32_t tf, uint32_t offset) +{ + /* + * Conditional expression: + * + * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (t_low + tf) + * + * DW I2C core starts counting the SCL CNTs for the LOW period + * of the SCL clock (t_low) as soon as it pulls the SCL line. + * In order to meet the t_low timing spec, we need to take into + * account the fall time of SCL signal (tf). Default tf value + * should be 0.3 us, for safety. + */ + return ((ic_clk * (t_low + tf) + 500000) / 1000000) - 1 + offset; +} + +static uint32_t i2c_dw_scl_hcnt(uint32_t ic_clk, uint32_t t_symbol, + uint32_t tf, uint32_t cond, uint32_t offset) +{ + /* + * DesignWare I2C core doesn't seem to have solid strategy to meet + * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec + * will result in violation of the tHD;STA spec. + */ + if (cond) + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH + * + * This is based on the DW manuals, and represents an ideal + * configuration. The resulting I2C bus speed will be faster + * than any of the others. + * + * If your hardware is free from tHD;STA issue, try this one. + */ + return (ic_clk * t_symbol + 500000) / 1000000 - 8 + offset; + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). + */ + return (ic_clk * (t_symbol + tf) + 500000) / 1000000 - 3 + offset; +} + +static int wait_for_cpu_i2c_bus_busy(void) +{ + uint64_t retry = 0; + uint32_t status = 0; + + do { + retry++; + status = !!(read_cpu_i2c_controller(DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY); + } while ((retry < MAX_RETRY) && status); + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) + return status; + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD); + + if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0) + buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD); + else + pr_err("Read timeout line %d.\n", __LINE__); + } + + return 0; +} + +static int i2c_write(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + /* Data transfer, poll till transmit ready bit is set */ + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (poll_for_status_set0(DW_IC_STATUS_TFNF) == 0) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i] | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i]); + } else { + pr_err("Write timeout %d.\n", __LINE__); + } + } + + mdelay(200); + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + return 0; +} + +/* Initialize I2c controller */ +void init_cpu_i2c_controller(void) +{ + uint32_t h_cnt; + uint32_t l_cnt; + uint32_t input_ic_clk_rate = IC_CLK_KHZ; /* by unit KHz ie. 25MHz */ + uint32_t sda_falling_time = 300; + uint32_t scl_falling_time = 300; + + /* + * The I2C protocol specification requires 300ns of hold time on the + * SDA signal (tHD;DAT) in standard and fast speed modes, and a hold + * time long enough to bridge the undefined part between logic 1 and + * logic 0 of the falling edge of SCL in high speed mode. + */ + uint32_t sda_hold_time = 432; + uint32_t sda_hold = 0; + + /* Firstly disable the controller. */ + pr_debug("Initialize CPU I2C controller\n"); + + write_cpu_i2c_controller(DW_IC_ENABLE, 0); + + sda_hold = (input_ic_clk_rate * sda_hold_time + 500000) / 1000000; + write_cpu_i2c_controller(DW_IC_SDA_HOLD, sda_hold); + + /* Set standard and fast speed deviders for high/low periods. */ + /* Standard-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 4000, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 4700, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_SS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_SS_SCL_LCNT, l_cnt); + + pr_debug("Standard-mode HCNT=%x, LCNT=%x\n", h_cnt, l_cnt); + + /* Fast-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 600, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 1300, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_FS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_FS_SCL_LCNT, l_cnt); + + pr_debug("Fast-mode HCNT=%x, LCNT=%d\n\n", h_cnt, l_cnt); + + /* Configure Tx/Rx FIFO threshold levels, since we will be working + * in polling mode set both thresholds to their minimum + */ + write_cpu_i2c_controller(DW_IC_TX_TL, 0); + write_cpu_i2c_controller(DW_IC_RX_TL, 0); + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + /* Configure the i2c master */ + write_cpu_i2c_controller(DW_IC_CON, + INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD); + +} + +/* + * This function enables I2C controllers. + * + * @param i2c_controller_index Bus Number of I2C controllers. + */ +void enable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + init_cpu_i2c_controller(); +} + +/* + * Write/Read data from I2C device. + * + * @i2c_controller_index: i2c bus number + * @slave_address: slave address + * @operation: to read or write + * @length: number of bytes + * @reg_offset: register offset + * @buffer: in/out buffer + */ +int i2c_bus_rw(uint8_t i2c_controller_index, uint8_t slave_address, + enum i2c_bus_operation operation, uint32_t length, + uint8_t reg_offset, void *buffer) +{ + uint8_t *byte_buffer = buffer; + int status = 0; + uint32_t databuffer, temp; + + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + status = wait_for_cpu_i2c_bus_busy(); + if (status) { + pr_err("%d\n", __LINE__); + return status; + } + + mdelay(1000); + + /* Set the slave address. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); /* Disable controller */ + databuffer = read_cpu_i2c_controller(DW_IC_CON); + databuffer &= ~DW_IC_CON_10BITADDR_MASTER; + write_cpu_i2c_controller(DW_IC_CON, databuffer); + + /* Fill the target addr. */ + write_cpu_i2c_controller(DW_IC_TAR, slave_address); + + temp = read_cpu_i2c_controller(DW_IC_TAR); + + /* Configure Tx/Rx FIFO threshold levels. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x1); /* Enable the adapter */ + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + if (operation == I2C_BUS_READ) + status = i2c_read(reg_offset, byte_buffer, length); + else if (operation == I2C_BUS_WRITE) + status = i2c_write(reg_offset, byte_buffer, length); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + + return status; +} + +void disable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + m_i2c_base_address = 0; +} + +void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data) +{ + enable_i2c_controller(CPLD_BUSNR); + i2c_bus_rw(CPLD_BUSNR, slave_addr, I2C_BUS_WRITE, sizeof(uint8_t), reg, &data); + disable_i2c_controller(CPLD_BUSNR); +} -- Gitee From a096bbe19482ce9523dcc637e43aef29b0e23384 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:49:04 +0800 Subject: [PATCH 356/953] anolis: drivers: iommu: add sw64 support ANBZ: #4688 Add iommu drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/iommu/Kconfig | 1 + drivers/iommu/Makefile | 2 +- drivers/iommu/sw64/Kconfig | 21 + drivers/iommu/sw64/Makefile | 3 + drivers/iommu/sw64/iommu.c | 1277 +++++++++++++++++++++ drivers/iommu/sw64/iommu_v2.c | 1780 +++++++++++++++++++++++++++++ drivers/iommu/sw64/sunway_iommu.h | 79 ++ 7 files changed, 3162 insertions(+), 1 deletion(-) create mode 100644 drivers/iommu/sw64/Kconfig create mode 100644 drivers/iommu/sw64/Makefile create mode 100644 drivers/iommu/sw64/iommu.c create mode 100644 drivers/iommu/sw64/iommu_v2.c create mode 100644 drivers/iommu/sw64/sunway_iommu.h diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d57c5adf932e..b1df0a09601b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -187,6 +187,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" +source "drivers/iommu/sw64/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce..f74b08c2fb00 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ sw64/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o diff --git a/drivers/iommu/sw64/Kconfig b/drivers/iommu/sw64/Kconfig new file mode 100644 index 000000000000..3a6a1e994f31 --- /dev/null +++ b/drivers/iommu/sw64/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# SW64 IOMMU SUPPORT +config SUNWAY_IOMMU + bool "Sunway IOMMU Support" + select IOMMU_API + select IOMMU_IOVA + select IOMMU_DMA + depends on SW64 && PCI && SUBARCH_C3B + help + Support for IOMMU on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". + +# SW64 IOMMU V2 SUPPORT +config SUNWAY_IOMMU_V2 + bool "Sunway IOMMU V2 Support" + select IOMMU_API + select IOMMU_IOVA + depends on SW64 && PCI && SUBARCH_C4 + help + Support for IOMMU V2 on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". diff --git a/drivers/iommu/sw64/Makefile b/drivers/iommu/sw64/Makefile new file mode 100644 index 000000000000..e61b343490aa --- /dev/null +++ b/drivers/iommu/sw64/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SUNWAY_IOMMU) += iommu.o +obj-$(CONFIG_SUNWAY_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/sw64/iommu.c b/drivers/iommu/sw64/iommu.c new file mode 100644 index 000000000000..32b18f726fd9 --- /dev/null +++ b/drivers/iommu/sw64/iommu.c @@ -0,0 +1,1277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_DMA_LIMIT (0xe0000000 - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff + +#define SW64_IOMMU_GRN_8K ((0UL) << 4) /* page size as 8KB */ +#define SW64_IOMMU_GRN_8M ((0x2UL) << 4) /* page size as 8MB */ +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; + +static int iommu_identity_mapping; + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, + struct sunway_iommu_dev *sdev_data) +{ + struct pci_controller *hose; + int devid; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + devid = sdev_data->devid; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + } +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + + flush_addr = __pa(flush_addr); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + pdev = sdev_data->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long pde; + unsigned long *pde_ptr; + int i, pdes_one_page; + + pde_ptr = sdomain->pt_root; + if (!pde_ptr) + return; + + pdes_one_page = PAGE_SIZE/sizeof(pde); + for (i = 0; i < pdes_one_page; i++, pde_ptr++) { + pde = *pde_ptr; + if ((pde & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + pde &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + pde |= PAGE_OFFSET; + free_page(pde); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + + dma_dom->sdomain.pt_root = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (dma_dom->sdomain.pt_root == NULL) { + pr_err("Allocating a new sdomain pt_root failed!\n"); + dma_domain_free(dma_dom); + return NULL; + } + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + page = alloc_pages_node(iommu->node, __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (iommu_identity_mapping) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev_data; + unsigned long flags; + + sdev_data = dev_iommu_priv_get(dev); + sunway_domain = sdev_data->domain; + + if (WARN_ON(!sdev_data->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev_data); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn, pte_l1_val, pte_l2_val; + unsigned long *pte_l1, *pte_l2; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + return pte_l2_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 59) & 0x7; + devid = (iommu_status >> 37) & 0xffff; + dva = iommu_status & 0xffffffff; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case UNAUTHORIZED_ACCESS: + pr_info("unauthorized access\n"); + break; + case ILLEGAL_RESPONSE: + pr_info("illegal response\n"); + break; + default: + pr_info("unknown error\n"); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + + iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); + } + + ret = iova_cache_get(); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +device_initcall(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long *pte_l2, unmapped; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + unmapped = 0; + while (unmapped < page_size) { + pte_l2 = (unsigned long *)fetch_pte(sunway_domain, iova, PTE_LEVEL2); + *pte_l2 = 0; + + flush_pcache_by_addr(sunway_domain, (unsigned long)pte_l2); + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + + iova += PAGE_SIZE; + unmapped += PAGE_SIZE; + } + + return unmapped; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + /* + * pde: page table entry + * pte: level 2 page table entry + * pte_root: page table root + */ + struct page *page; + struct sunway_iommu *iommu; + unsigned long pde, pte, iova_pfn; + unsigned long pdebaseaddr; + u64 *ptebasecond, ptebaseaddr; + u64 pte_root = (__pa(sunway_domain->pt_root) & PAGE_MASK); + + iova_pfn = (unsigned long)(bus_addr >> PAGE_SHIFT); + + pdebaseaddr = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pdebaseaddr += ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)) + + PAGE_OFFSET; + + pde = *(unsigned long *)pdebaseaddr; + if (pde) { + ptebaseaddr = (pde & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + + goto direct_map; + } + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating pages failed.\n"); + return -1; + } + + ptebasecond = page_address(page); + pde = (__pa(ptebasecond) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + + /* + * If pde exists, no need to allocate a new page. + * Atomic compare and exchange, compare the value the pointer points to + * with 0UL. If identical, store pde where the pointer points to, return + * 0UL. Otherwise, return the value the pointer points to. + */ + if (cmpxchg64((volatile u64 *)pdebaseaddr, 0ULL, pde)) { + ptebaseaddr = ((*(volatile u64 *)pdebaseaddr) + & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + free_page((unsigned long)ptebasecond); + } else { + flush_pcache_by_addr(sunway_domain, pdebaseaddr); + ptebaseaddr = (unsigned long)ptebasecond + + ((iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3); + } + +direct_map: + /* case 8K */ + if (page_size == (1UL << PAGE_SHIFT)) { + if (*(volatile u64 *)ptebaseaddr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8K | SW64_IOMMU_ENABLE; + *(volatile u64 *)ptebaseaddr = pte; + flush_pcache_by_addr(sunway_domain, ptebaseaddr); + /* case 8M */ + } else if (page_size == (1UL << PAGE_8M_SHIFT)) { + unsigned long *ptr; + int i, ptes_one_page, ptes_one_cache; + + ptr = (unsigned long *)ptebaseaddr; + ptes_one_page = PAGE_SIZE/sizeof(pte); + ptes_one_cache = L1_CACHE_BYTES/sizeof(pte); + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8M | SW64_IOMMU_ENABLE; + + for (i = 0; i < ptes_one_page; i++) { + if (*ptr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + *ptr = pte; + + /* just do once flush per cache line */ + if (i % ptes_one_cache == (ptes_one_cache - 1)) + flush_pcache_by_addr(sunway_domain, (unsigned long)ptr); + ptr++; + } + } +#ifdef CONFIG_SW64_GUEST + flush_ptlb_by_addr(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG); +#endif + return 0; +} + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0ULL; + sdomain->domain.geometry.aperture_end = (~0ULL); + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + iommu_identity_mapping = 1; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev_data; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + if (!dev_is_pci(dev)) + return -ENODEV; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev_data = dev_iommu_priv_get(dev); + if (!sdev_data) + return -EINVAL; + + if (sdev_data->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + + if (iova >= SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + paddr &= ~SW64_IOMMU_ENTRY_VALID; + grn = paddr & SW64_PTE_GRN_MASK; /* get page granularity */ + paddr &= PAGE_MASK; + + switch (grn) { + case SW64_IOMMU_GRN_8M: + paddr += (iova & ~HPAGE_MASK); + break; + case SW64_IOMMU_GRN_8K: + default: + paddr += (iova & ~PAGE_MASK); + break; + } + + return paddr; +} + +static int +sunway_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t size = pgcount << PAGE_SHIFT; + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova >= SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + if (ret) { + pr_info("Failed to map page from IOVA %lx.\n", iova); + return ret; + } + iova += page_size; + paddr += page_size; + } + mutex_unlock(&sdomain->api_lock); + + if (!ret && mapped) + *mapped = size; + + return ret; +} + +static size_t +sunway_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, + size_t page_size, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + size_t total_unmap = 0; + + if (iova >= SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + iova += page_size; + total_unmap += page_size; + } + mutex_unlock(&sdomain->api_lock); + + return total_unmap; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return generic_device_group(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + if (!dev_is_pci(dev)) + return ERR_PTR(-ENODEV); + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) + return &iommu->iommu; + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + if (dev_is_pci(dev)) { + if (iommu_identity_mapping) + return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +} + +static bool sunway_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + return false; +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, SW64_DMA_LIMIT); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sunway_iommu_attach_device, + .map_pages = sunway_iommu_map_pages, + .unmap_pages = sunway_iommu_unmap_pages, + .iova_to_phys = sunway_iommu_iova_to_phys, + .free = sunway_iommu_domain_free, + } +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +early_param("iommu_enable", iommu_enable_setup); diff --git a/drivers/iommu/sw64/iommu_v2.c b/drivers/iommu/sw64/iommu_v2.c new file mode 100644 index 000000000000..f3e19e524210 --- /dev/null +++ b/drivers/iommu/sw64/iommu_v2.c @@ -0,0 +1,1780 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_32BIT_DMA_LIMIT (0xe0000000 - 1) +#define SW64_64BIT_DMA_LIMIT ((1UL << 41) - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) \ + | ((1ULL) << PAGE_8M_SHIFT) \ + | ((1ULL) << PAGE_512M_SHIFT) \ + | ((1ULL) << PAGE_8G_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +#define PTE_VALID 0x8000000000000000UL +#define LAST_STAGE 0x100UL +#define PTE_GRN_8M 0x10UL +#define PTE_GRN_512M 0x20UL +#define PTE_GRN_8G 0x30UL +#define PTE_WRITEE 0x2UL +#define PTE_READE 0x1UL +#define PTE_RWE 0x3UL +#define PTE_FLAGS_MASK 0x8000000000000133UL +#define PAGE_8G_OFFSET_MASK ((1UL << PAGE_8G_SHIFT) - 1) +#define PAGE_512M_OFFSET_MASK ((1UL << PAGE_512M_SHIFT) - 1) +#define PAGE_8M_OFFSET_MASK ((1UL << PAGE_8M_SHIFT) - 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + PTE_LEVEL3, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, + PTE_LEVEL3_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; +static const struct dma_map_ops sunway_dma_ops; + + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev->pdev->bus); + + flush_addr = __pa(flush_addr); + /* Set memory bar here */ + mb(); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + pdev = sdev->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long *l2_pte, *l3_pte; + unsigned long l2_pte_val, l3_pte_val; + int l2_index, l3_index, ptes_one_page; + + l2_pte = sdomain->pt_root; + if (!l2_pte) + return; + + ptes_one_page = PAGE_SIZE/sizeof(unsigned long); + for (l2_index = 0; l2_index < ptes_one_page; l2_index++, l2_pte++) { + l2_pte_val = *l2_pte; + if ((l2_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l2_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l2_pte_val |= PAGE_OFFSET; + l3_pte = (unsigned long *)l2_pte_val; + for (l3_index = 0; l3_index < ptes_one_page; l3_index++, l3_pte++) { + l3_pte_val = *l3_pte; + if ((l3_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l3_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l3_pte_val |= PAGE_OFFSET; + free_page(l3_pte_val); + } + free_page(l2_pte_val); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_PFN(SW64_DMA_START)); + reserve_iova(&dma_dom->iovad, (0xe0000000UL >> PAGE_SHIFT), (0x100000000UL >> PAGE_SHIFT)); + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *dt_page, *pt_page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + dt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!dt_page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(dt_page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + if (!sdomain->pt_root) { + pt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, 0); + if (!pt_page) { + pr_err("Allocating pt_root failed!\n"); + return; + } + + sdomain->pt_root = page_address(pt_page); + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev; + unsigned long flags; + + sdev = dev_iommu_priv_get(dev); + sunway_domain = sdev->domain; + + if (WARN_ON(!sdev->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/* dma_ops helpers*/ +static struct sunway_iommu_domain *get_sunway_domain(struct device *dev) +{ + struct sunway_iommu_domain *sdomain; + struct iommu_domain *domain; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + sdev = dev_iommu_priv_get(dev); + sdomain = sdev->domain; + if (sdomain == NULL) { + domain = iommu_get_domain_for_dev(dev); + sdomain = to_sunway_domain(domain); + attach_device(dev, sdomain); + } + + if (sdomain == NULL) + return ERR_PTR(-EBUSY); + + return sdomain; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn; + unsigned long pte_l1_val, pte_l2_val, pte_l3_val; + unsigned long *pte_l1, *pte_l2, *pte_l3; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 20) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + pte_l2_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l2_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL3_OFFSET) << 3; + pte_l3 = (unsigned long *)(pte_l2_val + offset); + if (type == PTE_LEVEL3) + return (unsigned long)pte_l3; + + pte_l3_val = *pte_l3; + if (type == PTE_LEVEL3_VAL) + return pte_l3_val; + + return pte_l3_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 58) & 0xf; + devid = (iommu_status >> 36) & 0xffff; + dva = ((iommu_status & 0xffffffff) >> 3) << 13; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case PTE_LEVEL3: + pr_info("invalid level3 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL3), + fetch_pte(sdomain, dva, PTE_LEVEL3_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + default: + pr_info("iommu exception type %ld\n", type); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_device_set_ops(&iommu->iommu, &sunway_iommu_ops); + iommu_device_register(&iommu->iommu); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + } + + ret = iova_cache_get(); + if (ret) + return ret; + + ret = bus_set_iommu(&pci_bus_type, &sunway_iommu_ops); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +subsys_initcall_sync(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long offset, iova_pfn; + unsigned long *pte_base, *pte; + unsigned long grn; + int level, current_level; + int tmp = 1; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + level = 2; + grn = PTE_GRN_8M; + break; + default: + level = 3; + break; + } + + pte_base = sunway_domain->pt_root; + iova_pfn = iova >> PAGE_SHIFT; + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + if (current_level == level) { + if (grn == PTE_GRN_512M) { + int i; + + for (i = 0; i < 64; i++) { + *(pte + i) = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + + } else { + *pte = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + break; + } + + pte_base = (unsigned long *)((*pte & (~PTE_FLAGS_MASK)) | PAGE_OFFSET); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return page_size; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + struct page *page; + struct sunway_iommu *iommu; + unsigned long iova_pfn, pte_val; + unsigned long *pte_base, *pte; + unsigned long offset, grn = 0; + int level = 0, current_level; + int tmp = 1; + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + iova_pfn = bus_addr >> PAGE_SHIFT; + pte_base = sunway_domain->pt_root; + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + grn = PTE_GRN_8M; + level = 2; + break; + default: + level = 3; + break; + } + + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + + if (!(*pte) || (current_level == level)) { + pte_val = PTE_VALID | PTE_RWE | grn; + if (current_level == level) { + *(volatile u64 *)(pte) = 0; + pte_val |= ((paddr & PAGE_MASK) | LAST_STAGE); + } else { + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating level%d page table pages failed.\n", (level + 1)); + return -ENOMEM; + } + + pte_val |= (page_to_phys(page) & PAGE_MASK); + } + + if ((grn == PTE_GRN_512M) && (current_level == 2)) { + int i; + + for (i = 0; i < 64; i++) { + cmpxchg64((volatile u64 *)(pte + i), 0UL, pte_val); + flush_pcache_by_addr(sunway_domain, (unsigned long)(pte + i)); + } + } else { + if (cmpxchg64((volatile u64 *)pte, 0UL, pte_val)) + free_page((unsigned long)page_address(page)); + else + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + } + + pte_base = (unsigned long *)__va((*pte) & (~PTE_FLAGS_MASK)); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return 0; +} + +static unsigned long +sunway_alloc_iova(struct dma_domain *dma_dom, unsigned long pages, struct pci_dev *pdev) +{ + struct device *dev; + unsigned long pfn = 0; + + pages = __roundup_pow_of_two(pages); + dev = &(pdev->dev); + if (min(dev->coherent_dma_mask, *dev->dma_mask) == DMA_BIT_MASK(32)) { + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_32BIT_DMA_LIMIT), true); + } else { + /* IOVA boundary should be 16M ~ 3.5G */ + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_64BIT_DMA_LIMIT), true); + } + + return (pfn << PAGE_SHIFT); +} + +static void sunway_free_iova(struct dma_domain *dma_dom, + unsigned long address, unsigned long pages) +{ + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + free_iova_fast(&dma_dom->iovad, address, pages); +} + +static dma_addr_t +__sunway_map_single(struct dma_domain *dma_dom, + struct pci_dev *pdev, phys_addr_t paddr, size_t size) +{ + dma_addr_t ret, address, start; + unsigned long npages, i; + + npages = iommu_num_pages(paddr, size, PAGE_SIZE); + + address = sunway_alloc_iova(dma_dom, npages, pdev); + if (!address) + return 0; + + start = address; + for (i = 0; i < npages; ++i) { + ret = sunway_iommu_map_page(&dma_dom->sdomain, start, + paddr, PAGE_SIZE); + if (ret) { + pr_info("error when map page.\n"); + goto out_unmap; + } + + start += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + address += paddr & ~PAGE_MASK; + return address; + +out_unmap: + for (--i; i >= 0; --i) { + start -= PAGE_SIZE; + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + } + + sunway_free_iova(dma_dom, address, npages); + return 0; +} + +static dma_addr_t +pci_iommu_map_single(struct pci_dev *pdev, + struct dma_domain *dma_dom, void *cpu_addr, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + unsigned long paddr; + + if (hose == NULL) { + pr_err("%s: hose does not exist!\n", __func__); + return 0; + } + + paddr = __sunway_map_single(dma_dom, pdev, __pa(cpu_addr), size); + + pr_debug("pci_alloc_consistent: %zx -> [%px,%lx] from %ps\n", + size, cpu_addr, paddr, __builtin_return_address(0)); + + return paddr; +} + +static void *sunway_alloc_coherent(struct device *dev, + size_t size, + dma_addr_t *dma_addr, gfp_t gfp, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct sunway_iommu_dev *sdev; + struct page *page; + void *cpu_addr; + + if (!pdev) + return NULL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return NULL; + + gfp &= ~GFP_DMA; + +try_again: + page = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, get_order(size)); + if (!page) { + pr_err("Allocating pages failed.\n"); + return NULL; + } + + cpu_addr = page_address(page); + if (!cpu_addr) { + pr_info + ("pci_alloc_consistent: get_free_pages failed from %ps\n", + __builtin_return_address(0)); + + return NULL; + } + + *dma_addr = __pa(cpu_addr); + if (!(hose->iommu_enable)) + return cpu_addr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return cpu_addr; + } + + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); + if (*dma_addr == 0) { + free_pages((unsigned long)cpu_addr, get_order(size)); + if (gfp & GFP_DMA) + return NULL; + + gfp |= GFP_DMA; + goto try_again; + } + + return cpu_addr; +} + +static void +__sunway_unmap_single(struct dma_domain *dma_dom, dma_addr_t dma_addr, size_t size) +{ + dma_addr_t start; + unsigned long npages; + int i; + + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); + dma_addr &= PAGE_MASK; + start = dma_addr; + + for (i = 0; i < npages; i++) { + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + start += PAGE_SIZE; + } + + sunway_free_iova(dma_dom, dma_addr, npages); + pr_debug("pci_free_consistent: %zx -> [%llx] from %ps\n", + size, dma_addr, __builtin_return_address(0)); + +} + +static void +sunway_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + if (!pdev) + goto out_unmap; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + goto out_unmap; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + goto out_unmap; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); + goto out_free; + +out_unmap: + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + +out_free: + pr_debug("sunway_free_consistent: [%llx,%zx] from %ps\n", + dma_addr, size, __builtin_return_address(0)); + + free_pages((unsigned long)vaddr, get_order(size)); +} + +static dma_addr_t +sunway_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + phys_addr_t paddr = page_to_phys(page) + offset; + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + return paddr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, size, dir, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + return pci_iommu_map_single(pdev, dma_dom, + (char *)page_address(page) + offset, size); +} + +static void +sunway_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (hose == NULL) + return; + + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); +} + +#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) +static int +sunway_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom = NULL; + struct scatterlist *sg; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + int i, out_nents = 0; + + if (dir == PCI_DMA_NONE) + BUG(); + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return 0; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg_dma_address(sg) = __pa(SG_ENT_VIRT_ADDRESS(sg)); + if (!(hose->iommu_enable)) + goto check; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + goto check; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, dir, attrs); + } + + sg_dma_address(sg) = + pci_iommu_map_single(pdev, dma_dom, + SG_ENT_VIRT_ADDRESS(sg), sg->length); +check: + if (sg_dma_address(sg) == 0) + goto error; + + sg_dma_len(sg) = sg->length; + out_nents++; + } + + return nents; + +error: + pr_warn("pci_map_sg failed:"); + pr_warn("could not allocate dma page tables\n"); + + if (out_nents) + pci_unmap_sg(pdev, sgl, out_nents, dir); + return 0; +} + +static void +sunway_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct scatterlist *sg; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + dma_addr_t dma_addr; + long size; + int j; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, j) { + dma_addr = sg->dma_address; + size = sg->dma_length; + if (!size) + break; + + __sunway_unmap_single(dma_dom, dma_addr, size); + } +} + +static const struct dma_map_ops sunway_dma_ops = { + .alloc = sunway_alloc_coherent, + .free = sunway_free_coherent, + .map_sg = sunway_map_sg, + .unmap_sg = sunway_unmap_sg, + .map_page = sunway_map_page, + .unmap_page = sunway_unmap_page, + .dma_supported = dma_direct_supported, +}; + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0UL; + sdomain->domain.geometry.aperture_end = ~0ULL; + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + BUG_ON(!entry->domain); + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + BUG_ON(sdomain->dev_cnt != 0); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return -EINVAL; + + if (sdev->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static void sunway_iommu_detach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain != NULL) + detach_device(dev); +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + unsigned long is_last; + + if (iova > SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL1_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_8G) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8G_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_512M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_512M_OFFSET_MASK; + return paddr; + } + + if (grn == PTE_GRN_8M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8M_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL3_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + grn = paddr & SW64_PTE_GRN_MASK; + if (grn != 0) + return 0; + + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_MASK; + return paddr; +} + +static int +sunway_iommu_map(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, int iommu_prot, gfp_t gfp) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova > SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + mutex_unlock(&sdomain->api_lock); + + return ret; +} + +static size_t +sunway_iommu_unmap(struct iommu_domain *dom, unsigned long iova, + size_t page_size, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + + if (iova > SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + mutex_unlock(&sdomain->api_lock); + + return unmap_size; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return pci_device_group(dev); +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return ERR_PTR(-ENODEV); + + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) { + iommu = hose->pci_iommu; + return &iommu->iommu; + } + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain) + return 0; + + return sdev->domain->type; +} + +static bool sunway_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_INTR_REMAP: + return true; + default: + return false; + } +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) + set_dma_ops(dev, &sunway_dma_ops); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .domain_free = sunway_iommu_domain_free, + .attach_dev = sunway_iommu_attach_device, + .detach_dev = sunway_iommu_detach_device, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .map = sunway_iommu_map, + .unmap = sunway_iommu_unmap, + .iova_to_phys = sunway_iommu_iova_to_phys, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +__setup("iommu_enable=", iommu_enable_setup); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h new file mode 100644 index 000000000000..94a155001d1b --- /dev/null +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#include +#include +#include +#include + +struct sunway_iommu_bypass_id { + unsigned int vendor; + unsigned int device; +}; + +struct sunway_iommu { + int index; + bool enabled; + unsigned long *iommu_dtbr; + spinlock_t dt_lock; /* Device Table Lock */ + int node; /* NUMA node */ + + struct pci_controller *hose_pt; + struct iommu_device iommu; /* IOMMU core code handle */ +}; + +struct sunway_iommu_dev { + struct list_head list; /* For domain->dev_list */ + struct llist_node dev_data_list; /* Global device list */ + u16 devid; + int alias; + unsigned int passthrough; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + + spinlock_t lock; /* Lock the page table mainly */ + struct sunway_iommu_domain *domain; /* Domain device is bound to */ +}; + +struct sunway_iommu_domain { + unsigned int type; + spinlock_t lock; + struct mutex api_lock; + u16 id; /* Domain ID */ + struct list_head list; /* For list of all SW domains */ + struct list_head dev_list; /* List of devices in this domain */ + struct iommu_domain domain; /* IOMMU domain handle */ + unsigned long *pt_root; /* Page Table root */ + unsigned int dev_cnt; /* Number of devices in this domain */ +}; + +struct sw64dev_table_entry { + u64 data; +}; + +struct sunway_iommu_group { + struct pci_dev *dev; + struct iommu_group *group; +}; + +#define SW64_IOMMU_ENTRY_VALID ((1UL) << 63) +#define SW64_PTE_LAST_MASK ((1UL) << 8) /*last stage valid*/ +#define SW64_DMA_START 0x1000000 +#define SW64_PTE_GRN_MASK ((0x3UL) << 4) +#define PAGE_8M_SHIFT 23 +#define PAGE_512M_SHIFT 29 +#define PAGE_8G_SHIFT 33 +#define SW64_IOMMU_ENABLE 3 +#define SW64_IOMMU_DISABLE 0 +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff +#define SW64_IOMMU_LEVEL3_OFFSET 0x3ff +#define SW64_IOMMU_BYPASS 0x1 +#define SW64_IOMMU_MAP_FLAG ((0x1UL) << 20) + +#define PAGE_SHIFT_IOMMU 18 +#define PAGE_SIZE_IOMMU (_AC(1, UL) << PAGE_SHIFT_IOMMU) + +#define PCACHE_FLUSHPADDR_MASK 0xffffffffff80UL -- Gitee From 04f96becc7ee870b40060dc27323ab0a712bd85a Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:48:45 +0800 Subject: [PATCH 357/953] anolis: drivers: irqchip: add sw64 support ANBZ: #4688 Add irqchip drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/irqchip/Kconfig | 33 ++ drivers/irqchip/Makefile | 11 + drivers/irqchip/irq-sunway-cpu.c | 213 ++++++++++++ drivers/irqchip/irq-sunway-msi-v2.c | 512 ++++++++++++++++++++++++++++ drivers/irqchip/irq-sunway-msi-vt.c | 280 +++++++++++++++ drivers/irqchip/irq-sunway-msi.c | 472 +++++++++++++++++++++++++ drivers/irqchip/irq-sw64-intc-v2.c | 89 +++++ drivers/irqchip/irq-sw64-lpc-intc.c | 137 ++++++++ 8 files changed, 1747 insertions(+) create mode 100644 drivers/irqchip/irq-sunway-cpu.c create mode 100644 drivers/irqchip/irq-sunway-msi-v2.c create mode 100644 drivers/irqchip/irq-sunway-msi-vt.c create mode 100644 drivers/irqchip/irq-sunway-msi.c create mode 100644 drivers/irqchip/irq-sw64-intc-v2.c create mode 100644 drivers/irqchip/irq-sw64-lpc-intc.c diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index f7149d0f3d45..a75378efc2c2 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -11,6 +11,39 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP +config SW64_INTC_V2 + bool "SW64 Interrupt Controller V2" + depends on UNCORE_XUELANG + default y + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + This enables support for the INTC chip found in SW CHIP3 systems. + The INTC controls devices interrupts and connects them to each + core's local interrupt controller. + +config SW64_LPC_INTC + bool "SW64 cpu builtin LPC Interrupt Controller" + depends on SW64_INTC_V2 + help + Say yes here to add support for the SW64 cpu builtin LPC + IRQ controller. + +config SW64_IRQ_CPU + bool + depends on SW64 + default y + +config SW64_IRQ_MSI + bool + depends on SW64 && PCI_MSI + default y + +config SW64_IRQ_MSI_VT + bool + depends on SW64_IRQ_MSI + default y + config ARM_GIC_PM bool depends on PM diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ffd945fe71aa..466eb0bd2b52 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -27,6 +27,17 @@ obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_SW64_INTC_V2) += irq-sw64-intc-v2.o +obj-$(CONFIG_SW64_LPC_INTC) += irq-sw64-lpc-intc.o +obj-$(CONFIG_SW64_IRQ_CPU) += irq-sunway-cpu.o + +ifeq ($(CONFIG_UNCORE_XUELANG),y) +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi.o +else +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi-v2.o +endif + +obj-$(CONFIG_SW64_IRQ_MSI_VT) += irq-sunway-msi-vt.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o diff --git a/drivers/irqchip/irq-sunway-cpu.c b/drivers/irqchip/irq-sunway-cpu.c new file mode 100644 index 000000000000..ff7455c0f3ec --- /dev/null +++ b/drivers/irqchip/irq-sunway-cpu.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static void handle_intx(unsigned int offset) +{ + struct pci_controller *hose; + unsigned long value; + + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + value = read_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7)); + if (value >> 63) { + value = value & (~(1UL << 62)); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + handle_irq(hose->int_irq); + value = value | (1UL << 62); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + } + + if (IS_ENABLED(CONFIG_PCIE_PME)) { + value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value); + } + } + + if (IS_ENABLED(CONFIG_PCIEAER)) { + value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value); + } + } + + if (hose->iommu_enable) { + value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (value >> 63) + handle_irq(hose->int_irq); + } + } +} + +static void handle_device_interrupt(unsigned long irq_info) +{ + unsigned int i; + + if (is_guest_or_emul()) { + handle_irq(irq_info); + return; + } + + for (i = 0; i < 4; i++) { + if ((irq_info >> i) & 0x1) + handle_intx(i); + } +} + +/* Performance counter hook. A module can override this to do something useful. */ +static void dummy_perf(unsigned long vector, struct pt_regs *regs) +{ + irq_err_count++; + pr_crit("Performance counter interrupt!\n"); +} + +void (*perf_irq)(unsigned long vector, struct pt_regs *regs) = dummy_perf; +EXPORT_SYMBOL(perf_irq); + +static void handle_fault_int(void) +{ + int node; + unsigned long value; + + node = __this_cpu_read(hard_node_id); + pr_info("enter fault int, si_fault_stat = %#lx\n", + sw64_io_read(node, SI_FAULT_STAT)); + sw64_io_write(node, SI_FAULT_INT_EN, 0); + sw64_io_write(node, DLI_RLTD_FAULT_INTEN, 0); +#if defined(CONFIG_UNCORE_XUELANG) + value = 0; +#elif defined(CONFIG_UNCORE_JUNZHANG) + value = sw64_io_read(node, FAULT_INT_CONFIG); + value |= (1 << 8); +#endif + __io_write_fault_int_en(node, value); +} + +static void handle_mt_int(void) +{ + pr_info("enter mt int\n"); +} + +static void handle_nmi_int(void) +{ + pr_info("enter nmi int\n"); +} + +static void handle_dev_int(struct pt_regs *regs) +{ + unsigned long config_val, val, stat; + int node = 0; + unsigned int hwirq; + + config_val = sw64_io_read(node, DEV_INT_CONFIG); + val = config_val & (~(1UL << 8)); + sw64_io_write(node, DEV_INT_CONFIG, val); + stat = sw64_io_read(node, MCU_DVC_INT); + + while (stat) { + hwirq = ffs(stat) - 1; + generic_handle_domain_irq(NULL, hwirq); + stat &= ~(1UL << hwirq); + } + /*do handle irq */ + + sw64_io_write(node, DEV_INT_CONFIG, config_val); +} + +asmlinkage void do_entInt(unsigned long type, unsigned long vector, + unsigned long irq_arg, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + extern char __idle_start[], __idle_end[]; + + if (is_guest_or_emul()) { + if ((type & 0xffff) > 15) { + vector = type; + if (vector == 16) + type = INT_INTx; + else + type = INT_MSI; + } + } + + /* restart idle routine if it is interrupted */ + if (regs->pc > (u64)__idle_start && regs->pc < (u64)__idle_end) + regs->pc = (u64)__idle_start; + + switch (type & 0xffff) { + case INT_MSI: + old_regs = set_irq_regs(regs); + handle_pci_msi_interrupt(type, vector, irq_arg); + set_irq_regs(old_regs); + return; + case INT_INTx: + old_regs = set_irq_regs(regs); + handle_device_interrupt(vector); + set_irq_regs(old_regs); + return; + + case INT_IPI: +#ifdef CONFIG_SMP + handle_ipi(regs); + return; +#else + irq_err_count++; + pr_crit("Interprocessor interrupt? You must be kidding!\n"); +#endif + break; + case INT_RTC: + old_regs = set_irq_regs(regs); + sw64_timer_interrupt(); + set_irq_regs(old_regs); + return; + case INT_VT_SERIAL: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_VT_HOTPLUG: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_PC0: + perf_irq(PMC_PC0, regs); + return; + case INT_PC1: + perf_irq(PMC_PC1, regs); + return; + case INT_DEV: + old_regs = set_irq_regs(regs); + handle_dev_int(regs); + set_irq_regs(old_regs); + return; + case INT_FAULT: + old_regs = set_irq_regs(regs); + handle_fault_int(); + set_irq_regs(old_regs); + return; + case INT_MT: + old_regs = set_irq_regs(regs); + handle_mt_int(); + set_irq_regs(old_regs); + return; + case INT_NMI: + old_regs = set_irq_regs(regs); + handle_nmi_int(); + set_irq_regs(old_regs); + return; + default: + pr_crit("Hardware intr %ld %lx? uh?\n", type, vector); + } + pr_crit("PC = %016lx PS = %04lx\n", regs->pc, regs->ps); +} +EXPORT_SYMBOL(do_entInt); diff --git a/drivers/irqchip/irq-sunway-msi-v2.c b/drivers/irqchip/irq-sunway-msi-v2.c new file mode 100644 index 000000000000..36790dfedb33 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-v2.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + int rcid; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + rcid = cpu_to_rcid(chip_data->dst_cpu); + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = + (unsigned int)chip_data->msiaddr | + (rcid_to_msicid(rcid) << MSI_ADDR_DEST_ID_SHIFT); + msg->data = chip_data->vector; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static bool find_free_cpu_vectors(const struct cpumask *search_mask, int *found_cpu, int *found_vector, unsigned int nr_irqs) +{ + int i, vector, cpu; + bool found = false, find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + for (vector = 0; vector < 256; vector++) { + for (i = 0; i < nr_irqs; i++) + if (per_cpu(vector_irq, cpu)[vector + i]) + break; + + if (i == nr_irqs) { + found = true; + *found_cpu = cpu; + *found_vector = vector; + return found; + } + + vector += i; + } + + cpu = cpumask_next(cpu, search_mask); + if (cpu < nr_cpu_ids) + goto try_again; + else { + if (find_once_global) { + pr_warn("No global free vectors\n"); + return found; + } + pr_warn("No local free vectors\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags; + int vector, cpu; + int i; + struct msi_msg msg; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (cdata->multi_msi > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, cdata->multi_msi)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } else { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + spin_lock(&cdata->cdata_lock); + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cpu)[vector + i] = entry->irq + i; + BUG_ON(irq_chip_compose_msi_msg(irqd, &msg)); + __pci_write_msi_msg(entry, &msg); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy(irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int node; + int i, vector, cpu; + unsigned long msiaddr; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (type == IRQ_ALLOC_TYPE_MSI && nr_irqs > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, nr_irqs)) + return -ENOSPC; + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + for (i = 0; i < nr_irqs; i++) { + per_cpu(vector_irq, cpu)[vector + i] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + irq_data->chip_data = cdata; + } + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = nr_irqs; + cdata->move_in_progress = false; + } else { + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = 1; + cdata->move_in_progress = false; + } + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, type); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i, j; + struct irq_data *irq_data; + unsigned long flags; + unsigned int multi_msi; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_domain_reset_irq_data(irq_data); + multi_msi = cdata->multi_msi; + for (j = 0; j < multi_msi; j++) + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector + j] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + if (multi_msi > 1) + break; + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + for_each_pci_msi_entry(desc, dev) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + enum irq_alloc_type msi_type; + + if (arg == NULL) + return -ENODEV; + msi_type = info->type; + err = assign_irq_vector(virq, nr_irqs, domain, msi_type); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct msi_desc *desc = first_pci_msi_entry(pdev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (desc->msi_attrib.is_msix) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + err = msi_domain_alloc_irqs(msi_default_domain, &pdev->dev, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (cdata->dst_cpu == cpu) { + if (vector >= cdata->vector && + vector < cdata->vector + cdata->multi_msi) { + int i; + + raw_spin_lock(&vector_lock); + cdata->move_in_progress = false; + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector + i] = 0; + raw_spin_unlock(&vector_lock); + } + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, msi_index = 0; + int cpu, vector_index = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + int irq = 0; + + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sunway-msi-vt.c b/drivers/irqchip/irq-sunway-msi-vt.c new file mode 100644 index 000000000000..df8c7d72671b --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-vt.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(vector_lock); + +static void __vt_irq_msi_compose_msg(struct sw64_msi_chip_data *cdata, + struct msi_msg *msg) +{ + msg->address_hi = (u32)(VT_MSIX_MSG_ADDR >> 32); + msg->address_lo = (u32)(VT_MSIX_MSG_ADDR & 0xffffffff) + | VT_MSIX_ADDR_DEST_ID(cdata->dst_cpu); + msg->data = cdata->vector; +} + +static void vt_irq_msi_compose_msg(struct irq_data *irqd, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *cdata; + + cdata = irqd->chip_data; + __vt_irq_msi_compose_msg(cdata, msg); +} + +static void vt_irq_msi_update_msg(struct irq_data *irqd, + struct sw64_msi_chip_data *cdata) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __vt_irq_msi_compose_msg(cdata, msg); + pci_write_msi_msg(irqd->irq, msg); +} + +static int +vt_set_affinity(struct irq_data *irqd, const struct cpumask *cpumask, + bool force) +{ + struct sw64_msi_chip_data *cdata; + struct cpumask searchmask; + int cpu, vector; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target coreid is already in the new mask, + * and is online then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + spin_lock(&cdata->cdata_lock); + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->prev_vector = cdata->vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + vt_irq_msi_update_msg(irqd, irqd->chip_data); + + return 0; +} + +static struct irq_chip vt_pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = sw64_irq_noop, + .irq_compose_msi_msg = vt_irq_msi_compose_msg, + .irq_set_affinity = vt_set_affinity, +}; + +int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + int virq, val_node = 0; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long flags, node, rc_index; + const struct cpumask *mask; + + struct cpumask searchmask; + int cpu, vector; + + node = hose->node; + rc_index = hose->index; + mask = cpumask_of_node(node); + + raw_spin_lock_irqsave(&vector_lock, flags); + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq; + + irq_set_msi_desc(virq, desc); + irq_set_chip_and_handler_name(virq, &vt_pci_msi_controller, + handle_edge_irq, "edge"); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msix_irq); + +int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + unsigned long node, rc_index; + int virq = -1, val_node = 0; + unsigned long flags; + + const struct cpumask *mask; + struct cpumask searchmask; + int i, vector, cpu; + + if (type == PCI_CAP_ID_MSI && nvec > 32) + return 1; + + node = hose->node; + rc_index = hose->index; + raw_spin_lock_irqsave(&vector_lock, flags); + msi_for_each_desc(desc, &(dev->dev), MSI_DESC_ALL) { + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < desc->nvec_used; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + irq_set_msi_desc_off(virq, i, desc); + irq_set_chip_and_handler_name(virq + i, &vt_pci_msi_controller, handle_edge_irq, "edge"); + irq_data = irq_get_irq_data(virq + i); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + } + } + + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msi_irqs); + +void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs) +{ + int i; + unsigned long flags; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_get_irq_data(virq + i); + if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_data->hwirq = 0; + irq_data->chip = &no_irq_chip; + irq_data->chip_data = NULL; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +int __arch_setup_vt_msix_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *entry; + int ret; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + ret = chip_setup_vt_msix_irq(dev, entry); + if (ret) + return ret; + } + + return 0; +} + +int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = 0; + + if (type == PCI_CAP_ID_MSI) + ret = chip_setup_vt_msi_irqs(dev, nvec, type); + else if (type == PCI_CAP_ID_MSIX) + ret = __arch_setup_vt_msix_irqs(dev, nvec, type); + else + pr_info("SW arch do not identify ID:%d\n", type); + + return ret; +} diff --git a/drivers/irqchip/irq-sunway-msi.c b/drivers/irqchip/irq-sunway-msi.c new file mode 100644 index 000000000000..060aa96711b7 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = chip_data->msi_config_index; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static unsigned long set_piu_msi_config(struct pci_controller *hose, int cpu, + int msiconf_index, int vector) +{ + unsigned int reg; + unsigned long msi_config; + int phy_cpu; + + msi_config = (1UL << 62) | ((unsigned long)vector << 10); + phy_cpu = cpu_to_rcid(cpu); + msi_config |= ((phy_cpu >> 5) << 6) | (phy_cpu & 0x1f); + reg = MSICONFIG0 + (unsigned long)(msiconf_index << 7); + write_piu_ior0(hose->node, hose->index, reg, msi_config); + msi_config = read_piu_ior0(hose->node, hose->index, reg); + set_bit(msiconf_index, hose->piu_msiconfig); + + return msi_config; +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose; + struct pci_dev *pdev; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags, msi_config; + int vector, cpu; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + spin_lock(&cdata->cdata_lock); + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + msi_config = set_piu_msi_config(hose, cpu, cdata->msi_config_index, vector); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msi_config = msi_config; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int msiconf_index, node; + int i, vector, cpu; + unsigned long msi_config; + int start_index; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + msiconf_index = bitmap_find_next_zero_area(hose->piu_msiconfig, 256, 0, + nr_irqs, nr_irqs - 1); + + if (msiconf_index >= 256) { + pr_warn("No free msi on PIU!\n"); + return -ENOSPC; + } + + start_index = msiconf_index; + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + msiconf_index = start_index + i; + msi_config = set_piu_msi_config(hose, cpu, msiconf_index, vector); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->msi_config = msi_config; + cdata->msi_config_index = msiconf_index; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->move_in_progress = false; + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, hose); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i; + struct irq_data *irq_data; + struct pci_dev *pdev; + unsigned long flags; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + struct msi_desc *entry; + struct pci_controller *hose; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + entry = irq_get_msi_desc(virq + i); + if (entry) { + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + clear_bit(cdata->msi_config_index, hose->piu_msiconfig); + } + irq_domain_reset_irq_data(irq_data); + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + struct pci_controller *hose; + + if (arg == NULL) + return -ENODEV; + + hose = pci_bus_to_pci_controller(info->msi_dev->bus); + err = assign_irq_vector(virq, nr_irqs, domain, hose); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (pdev->msix_enabled) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + + err = msi_domain_alloc_irqs_all_locked(&pdev->dev, MSI_DEFAULT_DOMAIN, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +int pcibios_device_add(struct pci_dev *dev) +{ + if (msi_default_domain) + dev_set_msi_domain(&dev->dev, msi_default_domain); + return 0; +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (vector == cdata->vector && cdata->dst_cpu == cpu) { + raw_spin_lock(&vector_lock); + cdata->move_in_progress = 0; + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector] = 0; + raw_spin_unlock(&vector_lock); + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, piu_index, msi_index = 0; + int cpu, vector_index = 0; + unsigned long value = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + piu_index = cdata->msi_config_index; + value = cdata->msi_config | (1UL << 63); + write_piu_ior0(cdata->rc_node, cdata->rc_index, MSICONFIG0 + (piu_index << 7), value); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sw64-intc-v2.c b/drivers/irqchip/irq-sw64-intc-v2.c new file mode 100644 index 000000000000..bc2c8ef3ed2f --- /dev/null +++ b/drivers/irqchip/irq-sw64-intc-v2.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +static void fake_irq_mask(struct irq_data *data) +{ +} + +static void fake_irq_unmask(struct irq_data *data) +{ +} + +static struct irq_chip onchip_intc = { + .name = "SW fake Intc", + .irq_mask = fake_irq_mask, + .irq_unmask = fake_irq_unmask, +}; + +static int sw64_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + + irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops sw64_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = sw64_intc_domain_map, +}; + +#ifdef CONFIG_OF +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + + int node = 0; + int hwirq = 0, nirq = 8; + + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_linear(intc, 8, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + for (hwirq = 0 ; hwirq < nirq ; hwirq++) + irq_create_mapping(root_domain, hwirq); + + /*enable MCU_DVC_INT_EN*/ + sw64_io_write(node, MCU_DVC_INT_EN, 0xff); + + return 0; +} + +IRQCHIP_DECLARE(sw64_intc, "sw64,sw6_irq_controller", init_onchip_IRQ); + +static int __init +init_onchip_vt_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, 16, 0, 0, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(sw64_vt_intc, "sw64,sw6_irq_vt_controller", init_onchip_vt_IRQ); +#endif diff --git a/drivers/irqchip/irq-sw64-lpc-intc.c b/drivers/irqchip/irq-sw64-lpc-intc.c new file mode 100644 index 000000000000..1cbf87478242 --- /dev/null +++ b/drivers/irqchip/irq-sw64-lpc-intc.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#define LPC_NR_IRQS 16 +#define LPC_IRQ 0x4 +#define LPC_IRQ_MASK 0x8 + +struct lpc_intc_data { + struct irq_domain *domain; + struct irq_chip_generic *gc; +}; + +static void lpc_irq_mask_ack(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + unsigned int mask = data->mask; + + irq_gc_lock(gc); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + +static void lpc_irq_handler(struct irq_desc *desc) +{ + struct lpc_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq; + u32 status; + + chained_irq_enter(chip, desc); + + status = irq_reg_readl(b->gc, LPC_IRQ); + + if (status == 0) { + raw_spin_lock(&desc->lock); + handle_bad_irq(desc); + raw_spin_unlock(&desc->lock); + goto out; + } + + while (status) { + irq = __ffs(status); + status &= ~BIT(irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } + +out: + chained_irq_exit(chip, desc); +} + +static int __init lpc_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int set = IRQ_NOPROBE | IRQ_LEVEL; + struct lpc_intc_data *data; + struct irq_chip_type *ct; + int parent_irq, ret; + void __iomem *base; + int hwirq = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to remap lpc intc registers\n"); + ret = -ENOMEM; + goto out_free; + } + + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { + pr_err("failed to find parent interrupt\n"); + ret = -EINVAL; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, LPC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 16, 1, np->name, + handle_level_irq, 0, set, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_chained_handler_and_data(parent_irq, + lpc_irq_handler, data); + + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + + ct = data->gc->chip_types; + + ct->regs.ack = LPC_IRQ; + ct->regs.mask = LPC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = lpc_irq_mask_ack; + + for (hwirq = 0 ; hwirq < 16 ; hwirq++) + irq_create_mapping(data->domain, hwirq); + + /* Enable LPC interrupts */ + writel(0xffffebdd, base + LPC_IRQ_MASK); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(sw_lpc_intc, "sw64,lpc_intc", lpc_intc_of_init); -- Gitee From 11f944b824120a69f2d5708b35ada66508b5265b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:23 +0800 Subject: [PATCH 358/953] anolis: drivers: mfd: add sw64 support ANBZ: #4688 Add mfd drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/mfd/Kconfig | 16 ++ drivers/mfd/Makefile | 3 + drivers/mfd/lpc_sunway_chip3.c | 325 +++++++++++++++++++++++++++++++++ drivers/mfd/sunway_ast2400.c | 223 ++++++++++++++++++++++ 4 files changed, 567 insertions(+) create mode 100644 drivers/mfd/lpc_sunway_chip3.c create mode 100644 drivers/mfd/sunway_ast2400.c diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 68d71b4b55bd..6b653487d954 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -604,6 +604,22 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +config LPC_CHIP3 + tristate "CHIP3 LPC" + depends on UNCORE_XUELANG + select MFD_CORE + help + LPC bridge function of the chip3 provides support for + System Management Bus and General Purpose I/O. + +config SUNWAY_SUPERIO_AST2400 + tristate "SUNWAY SUPERIO AST2400" + depends on SW64 + select MFD_CORE + help + Nuvoton AST2400 Super I/O chip platform driver written + for SUNWAY LPC controller. + config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index c66f07edcd0e..700b3600eb79 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -269,6 +269,9 @@ obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o +obj-$(CONFIG_LPC_CHIP3) += lpc_sunway_chip3.o +obj-$(CONFIG_SUNWAY_SUPERIO_AST2400) += sunway_ast2400.o + obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o obj-$(CONFIG_MFD_SMPRO) += smpro-core.o diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c new file mode 100644 index 000000000000..1bcf40d6a6f7 --- /dev/null +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * lpc_sunway_chip3.c - LPC interface for SUNWAY CHIP3 + * + * LPC bridge function contains many other functional units, + * such as Interrupt controllers, Timers, Power Management, + * System Management, GPIO, RTC, and LPC Configuration + * Registers. + * + * Copyright (c) 2014 JN + * Author: Weiqiang Su + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum features { + LPC_USE_MSI = (1 << 0), + LPC_USE_INTX = (1 << 1), +}; + +enum { + LPC_HST_BAR = 0, + LPC_MEM_BAR = 2, + LPC_FWH_BAR = 4, +}; + +enum { + LPC_CTL = 0x0, + LPC_IRQ = 0x4, + LPC_IRQ_MASK = 0x8, + LPC_STAT = 0xc, + LPC_ERR_INF = 0x10, + LPC_MEM_HADDR = 0x14, + LPC_FWH_IDSEL_R1 = 0x18, + LPC_FWH_IDSEL_R2 = 0x1c, + LPC_FWH_IDSEL_R3 = 0x20, + LPC_FWH_IDSEL_R4 = 0x24, + LPC_FWH_IDSEL_R5 = 0x28, + LPC_FWH_DEC_EN1 = 0x2c, + LPC_FWH_DEC_EN2 = 0x30, + LPC_DMA_CTL = 0x34, + LPC_CH_STAT = 0x38, + LPC_CH0_ADDR = 0x3c, + LPC_CH1_ADDR = 0x40, + LPC_CH2_ADDR = 0x44, + LPC_CH3_ADDR = 0x48, + LPC_CH0_LENG = 0x4c, + LPC_CH1_LENG = 0x50, + LPC_CH2_LENG = 0x54, + LPC_CH3_LENG = 0x58, + LPC_CH0_MODE = 0x5c, + LPC_CH1_MODE = 0x60, + LPC_CH2_MODE = 0x64, + LPC_CH3_MODE = 0x68, + LPC_CH_MASK = 0x6c, + LPC_DMA_SWRST = 0x70, +}; + +struct lpc_chip3_adapter { + void __iomem *hst_regs; + struct device *dev; + int irq; + unsigned int features; +}; + +static struct resource superio_chip3_resources[] = { + { + .flags = IORESOURCE_IO, + } +}; + +static struct resource mem_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct resource fw_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct physmap_flash_data mem_flash_data = { + .width = 1, +}; + +static struct physmap_flash_data fw_flash_data = { + .width = 1, +}; + +static struct mfd_cell lpc_chip3_cells[] = { + { + .name = "sunway_superio_ast2400", + .id = 0, + .num_resources = ARRAY_SIZE(superio_chip3_resources), + .resources = superio_chip3_resources, + }, + { + .name = "chip3-flash", + .id = 0, + .num_resources = 1, + .resources = &mem_flash_resource, + .platform_data = &mem_flash_data, + .pdata_size = sizeof(mem_flash_data), + }, + { + .name = "chip3_fwh-flash", + .id = 0, + .num_resources = 1, + .resources = &fw_flash_resource, + .platform_data = &fw_flash_data, + .pdata_size = sizeof(fw_flash_data), + } +}; + +static inline void lpc_writel(void *address, int reg_base, int value) +{ + unsigned long addr = (unsigned long)address + reg_base; + + writel(value, (void *)addr); +} + +static inline int lpc_readl(void *address, int reg_base) +{ + unsigned long addr = (unsigned long)address + reg_base; + int value = readl((void *)addr); + + return value; +} + +static void lpc_enable(struct lpc_chip3_adapter *lpc_adapter) +{ + unsigned int value; + + value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + value |= 0x1600; + + /* LPC host enable */ + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, value); +} + +static void lpc_mem_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + mem_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x2UL << 28)); + mem_flash_resource.end = mem_flash_resource.start + SZ_256M - 1; + + writel(0x1f, lpc_adapter->hst_regs + LPC_MEM_HADDR); +} + +static void lpc_fw_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + fw_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x3UL << 28)); + fw_flash_resource.end = fw_flash_resource.start + SZ_256M - 1; + + writel(0xff0f, lpc_adapter->hst_regs + LPC_FWH_DEC_EN1); + writel(0xffff11ff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R5); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R4); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R3); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R2); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R1); + +} + +static int lpc_chip3_probe(struct platform_device *pdev) +{ + int ret; + struct lpc_chip3_adapter *lpc_adapter; + struct resource *mem; + + lpc_adapter = kzalloc(sizeof(*lpc_adapter), GFP_KERNEL); + if (lpc_adapter == NULL) { + dev_err(&pdev->dev, "%s kzalloc failed !\n", __func__); + return -ENOMEM; + } + + platform_set_drvdata(pdev, lpc_adapter); + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + lpc_adapter->hst_regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(lpc_adapter->hst_regs)) { + dev_err(&pdev->dev, "lpc region map failed\n"); + return PTR_ERR(lpc_adapter->hst_regs); + } + + lpc_adapter->dev = &pdev->dev; + lpc_adapter->features = 0; + + lpc_enable(lpc_adapter); + + lpc_mem_flash_init(pdev, lpc_adapter); + lpc_fw_flash_init(pdev, lpc_adapter); + + ret = mfd_add_devices(&pdev->dev, 0, + lpc_chip3_cells, ARRAY_SIZE(lpc_chip3_cells), + NULL, 0, NULL); + if (ret) + goto out_dev; + + dev_info(lpc_adapter->dev, "probe succeed !\n"); + + return ret; + +out_dev: + dev_info(lpc_adapter->dev, "probe failed !\n"); + + mfd_remove_devices(&pdev->dev); + kfree(lpc_adapter); + + return ret; +} + +static int lpc_chip3_remove(struct platform_device *pdev) +{ + struct lpc_chip3_adapter *lpc_adapter = platform_get_drvdata(pdev); + + mfd_remove_devices(&pdev->dev); + iounmap(lpc_adapter->hst_regs); + kfree(lpc_adapter); + + return 0; +} + +static const struct of_device_id chip3_lpc_of_match[] = { + {.compatible = "sunway,chip3_lpc",}, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, chip3_lpc_of_match); + +#ifdef CONFIG_PM_SLEEP +unsigned int lpc_irq_ctrl_value; +unsigned int lpc_irq_irq_value; +unsigned int lpc_irq_mask_value; + +/** + * chip3_lpc_platform_suspend - Suspend an chip3_lpc-platform device + * @dev: the platform device to suspend + * + * This function stores the lpc controller register values and + * restores them when the machine wakes up. + */ +int chip3_lpc_platform_suspend(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_irq_ctrl_value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + lpc_irq_irq_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); + lpc_irq_mask_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ_MASK); + + return 0; +} + +/** + * chip3_lpc_platform_resume - Resume an chip3_lpc-platform device + * @dev: the platform device to resume + * + * This function restores the register value before the suspend. + */ +int chip3_lpc_platform_resume(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, lpc_irq_ctrl_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, lpc_irq_irq_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, lpc_irq_mask_value); + + return 0; +} +static SIMPLE_DEV_PM_OPS(chip3_lpc_pm_ops, chip3_lpc_platform_suspend, + chip3_lpc_platform_resume); +#endif + + +static struct platform_driver chip3_lpc_platform_driver = { + .driver = { + .name = "chip3_lpc", + .of_match_table = chip3_lpc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &chip3_lpc_pm_ops, +#endif + }, + .remove = lpc_chip3_remove, +}; + +static int __init chip3_lpc_drvinit(void) +{ + return platform_driver_probe(&chip3_lpc_platform_driver, + lpc_chip3_probe); +} + +/* + * lpc controller init configure before serial drivers; + * The lpc & ast2400 should be initialized much before + * the serial initialized functions are called. + */ +subsys_initcall_sync(chip3_lpc_drvinit); + +static void __exit chip3_lpc_drvexit(void) +{ + platform_driver_unregister(&chip3_lpc_platform_driver); +} + +module_exit(chip3_lpc_drvexit); + +MODULE_AUTHOR("Weiqiang Su "); +MODULE_DESCRIPTION("LPC Interface for CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/sunway_ast2400.c b/drivers/mfd/sunway_ast2400.c new file mode 100644 index 000000000000..fbea07813643 --- /dev/null +++ b/drivers/mfd/sunway_ast2400.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/mfd/sunway_ast2400.c + * + * Copyright (C) 20014 - 2015 JN + * Author: Weiqiang Su + * + * Nuvoton AST2400 Super I/O chip platform driver written for + * SUNWAY LPC controller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int superio_uart0_irq; +static int superio_uart1_irq; +static void pnp_enable(device_t dev) +{ + pnp_enter_conf_mode(dev); + pnp_set_logical_device(dev); + pnp_set_enable(dev, 1); + pnp_exit_conf_mode(dev); +} + +const struct pnp_mode_ops pnp_conf_mode_8787_aa = { + .enter_conf_mode = pnp_enter_conf_mode_a5a5, + .exit_conf_mode = pnp_exit_conf_mode_aa, +}; + +static struct device_operations ops = { + .enable = pnp_enable, + .ops_pnp_mode = &pnp_conf_mode_8787_aa, +}; + +static struct pnp_info pnp_dev_info[] = { + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_FDC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_PP }, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP1}, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_KBC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIR}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_ACPI}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_HWM_FPLED}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_VID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIRWKUP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO_PP_OD}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SVID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_DSLP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA_LDN}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_WDT1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOBASE}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO0}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO3}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO4}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO5}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO6}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO7}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO8}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO9}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA}, +}; + +static void superio_com1_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x3); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart0_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void superio_com2_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x2); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart1_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void pnp_enable_devices(superio_device_t superio_device, + struct device_operations *ops, + unsigned int functions, struct pnp_info *info) +{ + int i = 0; + struct pnp_info *each_info; + struct pnp_device *each_device; + + /* Setup the ops and resources on the newly allocated devices. */ + for (i = 0; i < functions; i++) { + each_info = info + i; + each_device = &each_info->pnp_device; + + /* Skip logical devices this Super I/O doesn't enable. */ + if (each_info->enabled == false) + continue; + + each_device->device = each_info->function; + each_device->ops = ops; + each_device->port = superio_device->superio_ast2400_efir; + + switch (each_device->device) { + case AST2400_SP1: + each_device->ops->init = superio_com1_init; + break; + case AST2400_SP2: + each_device->ops->init = superio_com2_init; + break; + } + + if (each_device->ops->init) + each_device->ops->init(each_device); + } +} + +static void superio_enable_devices(superio_device_t superio_device) +{ + pnp_enable_devices(superio_device, &ops, + ARRAY_SIZE(pnp_dev_info), pnp_dev_info); +} + +static int superio_ast2400_probe(struct platform_device *pdev) +{ + int err = 0; + superio_device_t superio_device; + struct resource *res; + resource_size_t physaddr = 0; + + /* allocate space for device info */ + superio_device = kzalloc(sizeof(struct superio_ast2400_device), GFP_KERNEL); + if (superio_device == NULL) { + err = -ENOMEM; + return err; + } + + res = platform_get_resource(pdev, IORESOURCE_IO, 1); + if (res) { + physaddr = res->start; + dev_info(&pdev->dev, "request memory region %pR\n", res); + } + + superio_device->dev = &pdev->dev; + superio_device->enabled = 1; + superio_device->superio_ast2400_efir = physaddr + SUPERIO_PNP_PORT; + superio_device->superio_ast2400_efdr = physaddr + SUPERIO_PNP_PORT + 1; + superio_uart0_irq = platform_get_irq_byname(pdev, "uart0_irq"); + superio_uart1_irq = platform_get_irq_byname(pdev, "uart1_irq"); + + superio_enable_devices(superio_device); + + platform_set_drvdata(pdev, superio_device); + + dev_info(superio_device->dev, "probe succeed !\n"); + + return 0; +} + +static int superio_ast2400_remove(struct platform_device *pdev) +{ + superio_device_t superio_device = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + kfree(superio_device); + + return 0; +} + +static struct platform_driver superio_nuvoton_ast2400_driver = { + .probe = superio_ast2400_probe, + .remove = superio_ast2400_remove, + .driver = { + .name = "sunway_superio_ast2400" + }, +}; + +static int __init superio_nuvoton_ast2400_init(void) +{ + return platform_driver_register(&superio_nuvoton_ast2400_driver); +} + +subsys_initcall_sync(superio_nuvoton_ast2400_init); + +static void __exit superio_nuvoton_ast2400_exit(void) +{ + platform_driver_unregister(&superio_nuvoton_ast2400_driver); +} + +module_exit(superio_nuvoton_ast2400_exit); + +MODULE_DESCRIPTION("NUVOTON AST2400 Super I/O DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Weiqiang Su"); -- Gitee From de38441f5f2a79d218b6c8ab7aaf5209eb119c6f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:48 +0800 Subject: [PATCH 359/953] anolis: drivers: misc: add sw64 support ANBZ: #4688 Add misc drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/misc/Kconfig | 8 ++ drivers/misc/Makefile | 1 + drivers/misc/kgdbts.c | 3 +- drivers/misc/sunway-ged.c | 253 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 drivers/misc/sunway-ged.c diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index cadd4a820c03..1e9def44eb09 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -376,6 +376,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SUNWAY_GED + tristate "sunway generic device driver for memhotplug" + depends on SW64 + depends on MEMORY_HOTPLUG + help + This driver provides support for sunway generic device driver for + memhotplug, providing configuration and heading data via sysfs. + config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d4..ccf5456e1d88 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o +obj-$(CONFIG_SUNWAY_GED) += sunway-ged.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e541..6cd73f2a487f 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -130,7 +130,8 @@ static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; -#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) \ + || defined(CONFIG_SW64) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; diff --git a/drivers/misc/sunway-ged.c b/drivers/misc/sunway-ged.c new file mode 100644 index 000000000000..b4e4ca315852 --- /dev/null +++ b/drivers/misc/sunway-ged.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Generic Event Device for ACPI. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OFFSET_START_ADDR 0 +#define OFFSET_LENGTH 8 +#define OFFSET_STATUS 16 +#define OFFSET_SLOT 24 + +/* Memory hotplug event */ +#define SUNWAY_MEMHOTPLUG_ADD 0x1 +#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 + +struct sunway_memory_device { + struct sunway_ged_device *device; + unsigned int state; /* State of the memory device */ + struct list_head list; + + u64 start_addr; /* Memory Range start physical addr */ + u64 length; /* Memory Range length */ + u64 slot; /* Memory Range slot */ + unsigned int enabled:1; +}; + +struct sunway_ged_device { + struct device *dev; + void __iomem *membase; + void *driver_data; + spinlock_t lock; + struct list_head dev_list; +}; + +static int sunway_memory_enable_device(struct sunway_memory_device *mem_device) +{ + int num_enabled = 0; + int result = 0; + + if (mem_device->enabled) { /* just sanity check...*/ + num_enabled++; + goto out; + } + + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!mem_device->length) + goto out; + + lock_device_hotplug(); + /* suppose node = 0, fix me! */ + result = __add_memory(0, mem_device->start_addr, mem_device->length); + unlock_device_hotplug(); + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) + goto out; + + mem_device->enabled = 1; + + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ + num_enabled++; +out: + if (!num_enabled) { + dev_err(mem_device->device->dev, "add_memory failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sunway_memory_get_meminfo(struct sunway_memory_device *mem_device) +{ + struct sunway_ged_device *geddev; + + if (!mem_device) + return -EINVAL; + + if (mem_device->enabled) + return 0; + + geddev = mem_device->device; + + mem_device->start_addr = readq(geddev->membase + OFFSET_START_ADDR); + mem_device->length = readq(geddev->membase + OFFSET_LENGTH); + + return 0; +} + +static void sunway_memory_device_remove(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_dev, *n; + unsigned long start_addr, length, slot; + + if (!device) + return; + + start_addr = readq(device->membase + OFFSET_START_ADDR); + length = readq(device->membase + OFFSET_LENGTH); + slot = readq(device->membase + OFFSET_SLOT); + + list_for_each_entry_safe(mem_dev, n, &device->dev_list, list) { + if (!mem_dev->enabled) + continue; + + if ((start_addr == mem_dev->start_addr) && + (length == mem_dev->length)) { + /* suppose node = 0, fix me! */ + remove_memory(0, start_addr, length); + list_del(&mem_dev->list); + kfree(mem_dev); + } + } + + writeq(slot, device->membase + OFFSET_SLOT); +} + +static int sunway_memory_device_add(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_device; + int result; + + if (!device) + return -EINVAL; + + mem_device = kzalloc(sizeof(struct sunway_memory_device), GFP_KERNEL); + if (!mem_device) + return -ENOMEM; + + INIT_LIST_HEAD(&mem_device->list); + mem_device->device = device; + + /* Get the range from the IO */ + mem_device->start_addr = readq(device->membase + OFFSET_START_ADDR); + mem_device->length = readq(device->membase + OFFSET_LENGTH); + mem_device->slot = readq(device->membase + OFFSET_SLOT); + + result = sunway_memory_enable_device(mem_device); + if (result) { + dev_err(device->dev, "sunway_memory_enable_device() error\n"); + sunway_memory_device_remove(device); + + return result; + } + + list_add_tail(&mem_device->list, &device->dev_list); + dev_dbg(device->dev, "Memory device configured\n"); + + hcall(HCALL_MEMHOTPLUG, mem_device->start_addr, 0, 0); + + return 1; +} + +static irqreturn_t sunwayged_ist(int irq, void *data) +{ + struct sunway_ged_device *sunwayged_dev = data; + unsigned int status; + + status = readl(sunwayged_dev->membase + OFFSET_STATUS); + + /* through IO status to add or remove memory device */ + if (status & SUNWAY_MEMHOTPLUG_ADD) + sunway_memory_device_add(sunwayged_dev); + + if (status & SUNWAY_MEMHOTPLUG_REMOVE) + sunway_memory_device_remove(sunwayged_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t sunwayged_irq_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static int sunwayged_probe(struct platform_device *pdev) +{ + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct sunway_ged_device *geddev; + struct device *dev; + int irqflags; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL); + if (!geddev) + return -ENOMEM; + + spin_lock_init(&geddev->lock); + geddev->membase = devm_ioremap(&pdev->dev, + regs->start, resource_size(regs)); + if (!geddev->membase) + return -ENOMEM; + + INIT_LIST_HEAD(&geddev->dev_list); + geddev->dev = &pdev->dev; + irqflags = IRQF_SHARED; + + if (request_threaded_irq(irq, sunwayged_irq_handler, sunwayged_ist, + irqflags, "SUNWAY:Ged", geddev)) { + dev_err(dev, "failed to setup event handler for irq %u\n", irq); + + return -EINVAL; + } + + platform_set_drvdata(pdev, geddev); + + return 0; +} + +static int sunwayged_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id sunwayged_of_match[] = { + {.compatible = "sw6,sunway-ged", }, + { } +}; +MODULE_DEVICE_TABLE(of, sunwayged_of_match); + +static struct platform_driver sunwayged_platform_driver = { + .driver = { + .name = "sunway-ged", + .of_match_table = sunwayged_of_match, + }, + .probe = sunwayged_probe, + .remove = sunwayged_remove, +}; +module_platform_driver(sunwayged_platform_driver); + +MODULE_AUTHOR("Lu Feifei"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sunway ged driver"); -- Gitee From 5702422eaa7bfe96d7638b099bc6df202534ee88 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:44:12 +0800 Subject: [PATCH 360/953] anolis: drivers: pci: add sw64 support ANBZ: #4688 Add common pci and pci controller drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/pci/controller/Kconfig | 4 + drivers/pci/controller/Makefile | 1 + drivers/pci/controller/pci-sunway.c | 805 ++++++++++++++++++++++++++++ drivers/pci/pci.c | 4 + drivers/pci/probe.c | 1 + drivers/pci/quirks.c | 2 + 6 files changed, 817 insertions(+) create mode 100644 drivers/pci/controller/pci-sunway.c diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index c0c3f2824990..2a2a3ccd66ad 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -342,6 +342,10 @@ config PCIE_XILINX_CPM Say 'Y' here if you want kernel support for the Xilinx Versal CPM host bridge. +config PCI_SW64 + bool + depends on SW64 && PCI + source "drivers/pci/controller/cadence/Kconfig" source "drivers/pci/controller/dwc/Kconfig" source "drivers/pci/controller/mobiveil/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 37c8663de7fe..9d161c053bc4 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o +obj-$(CONFIG_PCI_SW64) += pci-sunway.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c new file mode 100644 index 000000000000..036994ffde38 --- /dev/null +++ b/drivers/pci/controller/pci-sunway.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include + +void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + +#ifdef CONFIG_UNCORE_JUNZHANG +void set_adr_int(int node) +{ + sw64_io_write(node, ADR_INT_CONFIG, (0x0 << 16 | 0x3f)); + sw64_io_write(node, ADR_CTL, 0xc); +} +#endif + +void set_pcieport_service_irq(int node, int index) +{ + if (IS_ENABLED(CONFIG_PCIE_PME)) + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + + if (IS_ENABLED(CONFIG_PCIEAER)) + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); +} + +int chip_pcie_configure(struct pci_controller *hose) +{ + struct pci_dev *dev; + struct pci_bus *bus, *top; + struct list_head *next; + unsigned int max_read_size, smallest_max_payload; + int max_payloadsize; + unsigned long rc_index, node; + unsigned long piuconfig0, value; + unsigned int pcie_caps_offset; + unsigned int rc_conf_value; + u16 devctl, new_values; + bool rc_ari_disabled = false, found = false; + unsigned char bus_max_num; + + node = hose->node; + rc_index = hose->index; + smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD; + bus_max_num = hose->busn_space->start; + + top = hose->bus; + bus = top; + next = top->devices.next; + + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + if (!found) { + if (pci_is_root_bus(dev->bus)) { + if (list_empty(&dev->subordinate->devices)) + rc_ari_disabled = true; + } else { + if (!pci_ari_enabled(dev->bus)) { + rc_ari_disabled = true; + found = true; + } + } + } + + if (bus->busn_res.end > bus_max_num) + bus_max_num = bus->busn_res.end; + + /* Query device PCIe capability register */ + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + max_payloadsize = dev->pcie_mpss; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + } + + if (rc_ari_disabled) { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value &= ~PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } else { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value |= PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } + + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + rc_conf_value &= PCI_EXP_DEVCAP_PAYLOAD; + max_payloadsize = rc_conf_value; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + + max_read_size = 0x2; /* Limit to 512B */ + value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL); + value &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + value |= (max_read_size << 12) | (smallest_max_payload << 5); + write_rc_conf(node, rc_index, RC_EXP_DEVCTL, value); + new_values = (max_read_size << 12) | (smallest_max_payload << 5); + + piuconfig0 = read_piu_ior0(node, rc_index, PIUCONFIG0); + piuconfig0 &= ~(0x7fUL << 9); + if (smallest_max_payload == 0x2) { + piuconfig0 |= (0x20UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } else { + piuconfig0 |= (0x40UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } + + pr_info("Node%ld RC%ld MPSS %luB, MRRS %luB, Piuconfig0 %#lx, ARI %s\n", + node, rc_index, (1UL << smallest_max_payload) << 7, + (1UL << max_read_size) << 7, piuconfig0, + rc_ari_disabled ? "disabled" : "enabled"); + + /* Now, set the max_payload_size for all devices to that value. */ + bus = top; + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + + pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); + devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + devctl |= new_values; + pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); + } + + return bus_max_num; +} + +static int check_pci_linkup(unsigned long node, unsigned long index) +{ + unsigned long rc_debug; + + if (is_guest_or_emul()) { + if (node == 0 && index == 0) + return 0; + else + return 1; + } else { + rc_debug = read_piu_ior1(node, index, RCDEBUGINF1); + } + + return !(rc_debug == 0x111); +} + +static void set_rc_piu(unsigned long node, unsigned long index) +{ + unsigned int i __maybe_unused; + unsigned int value; + u32 rc_misc_ctrl; + + if (is_guest_or_emul()) + return; + + /* configure RC, set PCI-E root controller */ + write_rc_conf(node, index, RC_COMMAND, 0x00100007); + write_rc_conf(node, index, RC_PORT_LINK_CTL, 0x1f0020); + write_rc_conf(node, index, RC_EXP_DEVCTL, 0x2850); + write_rc_conf(node, index, RC_EXP_DEVCTL2, 0x6); + write_rc_conf(node, index, RC_ORDER_RULE_CTL, 0x0100); + + /* enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + + write_rc_conf(node, index, RC_PRIMARY_BUS, 0xffffff); + write_piu_ior0(node, index, PIUCONFIG0, PIUCONFIG0_INIT_VAL); + + write_piu_ior1(node, index, PIUCONFIG1, 0x2); + write_piu_ior1(node, index, ERRENABLE, -1); + + /* set DMA offset value PCITODMA_OFFSET */ + write_piu_ior0(node, index, EPDMABAR, PCITODMA_OFFSET); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + write_piu_ior0(node, index, MSIADDR, MSIX_MSG_ADDR); +#ifdef CONFIG_UNCORE_XUELANG + for (i = 0; i < 256; i++) + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), 0); +#endif + } +} + +static void set_intx(unsigned long node, unsigned long index, + unsigned long int_conf) +{ + if (is_guest_or_emul()) + return; + +#if defined(CONFIG_UNCORE_XUELANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x8UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x1UL << 10)); +#elif defined(CONFIG_UNCORE_JUNZHANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x1UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x8UL << 10)); +#endif +} + +static unsigned long get_rc_enable(unsigned long node) +{ + unsigned long rc_enable; + + if (is_guest_or_emul()) + return 1; + + rc_enable = sw64_io_read(node, IO_START); + + return rc_enable; +} + +static int map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; +} + +static void hose_init(struct pci_controller *hose) +{ + unsigned long pci_io_base; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + pci_io_base = IO_BASE | (hose->node << IO_NODE_SHIFT) + | PCI_BASE | (hose->index << IO_RC_SHIFT); + + hose->dense_mem_base = pci_io_base; + hose->dense_io_base = pci_io_base | PCI_LEGACY_IO; + hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG); + hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG); + + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; + hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; + hose->mem_space->name = "pci memory space"; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, hose->mem_space) < 0) + pr_err("Failed to request MEM on hose %ld\n", hose->index); + hose->pre_mem_space->start = pci_io_base | PCI_64BIT_MEMIO; + hose->pre_mem_space->end = hose->pre_mem_space->start + PCI_64BIT_MEMIO_SIZE - 1; + hose->pre_mem_space->name = "pci pre mem space"; + hose->pre_mem_space->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + + if (request_resource(&iomem_resource, hose->pre_mem_space) < 0) + pr_err("Failed to request 64bit MEM on hose %ld\n", hose->index); + hose->io_space->start = pci_io_base | PCI_LEGACY_IO; + hose->io_space->end = hose->io_space->start + PCI_LEGACY_IO_SIZE - 1; + hose->io_space->name = "pci io space"; + hose->io_space->flags = IORESOURCE_IO; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + pr_err("Failed to request IO on hose %ld\n", hose->index); + hose->busn_space->name = "PCI busn"; + hose->busn_space->start = 0xff; + hose->busn_space->end = 0xff; + hose->busn_space->flags = IORESOURCE_BUS; + hose->first_busno = hose->self_busno = hose->busn_space->start; + hose->last_busno = hose->busn_space->end; + + if (is_in_host()) { + if (IS_ENABLED(CONFIG_PCI_MSI)) + memset(hose->piu_msiconfig, 0, 256/8); + } +}; + +static struct sw64_pci_init_ops chip_pci_init_ops = { + .map_irq = map_irq, + .get_rc_enable = get_rc_enable, + .hose_init = hose_init, + .set_rc_piu = set_rc_piu, + .check_pci_linkup = check_pci_linkup, + .set_intx = set_intx, +}; + +void __init setup_chip_pci_ops(void) +{ + sw64_chip_init->pci_init = chip_pci_init_ops; +} + +static unsigned long rc_linkup; +static struct pci_controller *head, **tail = &head; + +static void pci_mark_rc_linkup(unsigned long node, unsigned long index) +{ + set_bit(node * 8 + index, &rc_linkup); +} + +static int pci_get_rc_linkup(unsigned long node, unsigned long index) +{ + return test_bit(node * 8 + index, &rc_linkup); +} + +/** + * Link the specified pci controller to list + */ +extern struct pci_controller *hose_head; +static void pci_link_controller(struct pci_controller *hose) +{ + if (unlikely(!hose)) + return; + + *tail = hose; + tail = &hose->next; + + if (!hose_head) + hose_head = head; +} + +struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num) +{ + struct pci_controller *hose; + + for (hose = head; hose; hose = hose->next) { + if (bus_num >= hose->first_busno && bus_num <= hose->last_busno) + return hose; + } + + return NULL; +} + +struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus) +{ + struct pci_config_window *cfg = NULL; + + if (unlikely(!bus)) + return NULL; + + if (acpi_disabled) + return (struct pci_controller *)(bus->sysdata); + + cfg = (struct pci_config_window *)bus->sysdata; + return (struct pci_controller *)(cfg->priv); +} + +/** + * PCIe Root Complex read config space operations + */ +static int sw64_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + /** + * Workaround for sw6a chipset due to only support scan with devfn = 0, + * while sw6b does not have this limit. + */ + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * PCIe Root Complex write config space operations + */ +int sw64_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * sw64_pcie_valid_device - check if a valid device is present on bus + * @bus : PCI bus structure + * @devfn: device/function + * + * @return: 'true' on success and 'false' if invalid device is found + */ +static bool sw64_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/** + * sw64_pcie_config_read - read val from config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val[out]: the value read from PCI host controller or device + * + * @return: Whether read operation success + */ +static int sw64_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw64_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (pci_get_rc_linkup(hose->node, hose->index)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +/** + * sw64_pcie_config_write - write val to config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val : the value write to PCI host controller or device + * + * @return: Whether write operation success + */ +static int sw64_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw64_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/** + * sw64_pcie_map_bus - get configuration base address + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * + * @return: base address of the configuration space needed to be + * accessed. + */ +static void __iomem *sw64_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw64_pcie_valid_device(bus, devfn)) + return NULL; + + /** + * ECAM of Sunway PCI host controller is slightly + * different from the standrad: + * [31:24]: bus number + * [23:19]: device number + * [18:16]: function number + * [15:12]: reserved + * [11:8] : extended config space registers + * [7:2] : legacy config space registers + */ + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +#ifdef CONFIG_ACPI +int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return map_irq(dev, slot, pin); +} + +static void setup_intx_irqs(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + + set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +static int sw64_pci_prepare_controller(struct pci_controller *hose, + struct acpi_device *adev) +{ + unsigned long long index, node; + unsigned long long rc_config_base_addr; + unsigned long long pci_io_base_addr; + unsigned long long ep_io_base_addr; + acpi_status rc; + + /* Get node from ACPI namespace */ + node = acpi_get_node(adev->handle); + if (node == NUMA_NO_NODE) { + dev_err(&adev->dev, "unable to get node ID\n"); + return -EEXIST; + } + + /* Get index from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "INDX", NULL, &index); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve INDX\n"); + return -EEXIST; + } + + /** + * Get Root Complex config space base address. + * + * For sw64, Root Complex config space base addr is different + * from Endpoint config space base address. Use MCFG table to + * pass Endpoint config space base address, and define Root Complex + * config space base address("RCCB") separately in the ACPI namespace. + */ + rc = acpi_evaluate_integer(adev->handle, "RCCB", NULL, &rc_config_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCCB\n"); + return -EEXIST; + } + + /* Get Root Complex I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "RCIO", NULL, &pci_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCIO\n"); + return -EEXIST; + } + + /* Get Endpoint I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "EPIO", NULL, &ep_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve EPIO\n"); + return -EEXIST; + } + + hose->iommu_enable = false; + hose->index = index; + hose->node = node; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base = pci_io_base_addr; + hose->dense_io_base = ep_io_base_addr; + + hose->rc_config_space_base = __va(rc_config_base_addr); + + hose->first_busno = 0xff; + hose->last_busno = 0xff; + hose->self_busno = 0xff; + + hose->need_domain_info = 0; + +#if IS_ENABLED(CONFIG_PCI_MSI) + if (is_in_host()) + memset(hose->piu_msiconfig, 0, 256 / 8); /* 256 bits bitmap */ +#endif + + /** + * There are two prerequisites for Root Complex + * of Sunway to work: + * 1. Root Complex enable + * 2. Root Complex link up + */ + set_rc_piu(hose->node, hose->index); + if (check_pci_linkup(hose->node, hose->index)) { + /** + * Root Complex link up failed. + * This usually means that no device on the slot. + */ + dev_info(&adev->dev, ": failed to link up\n", + hose->node, hose->index); + } else { + pci_mark_rc_linkup(hose->node, hose->index); + dev_info(&adev->dev, ": successfully link up\n", + hose->node, hose->index); + } + + setup_intx_irqs(hose); + + pci_link_controller(hose); + + return 0; +} + +/** + * Use the info from ACPI to init pci_controller + */ +static int sw64_pci_ecam_init(struct pci_config_window *cfg) +{ + struct pci_controller *hose = NULL; + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + phys_addr_t mcfg_addr; + int ret; + + /** + * First, check whether Root Complex is enabled. + * If Root Complex disabled, there's no need to continue. + * + * In ACPI namespace, we use _STA method to indicate + * whether Root Complex is enabled. + * + * The _STA has been checked when creating acpi_device. + * Double check here to get the latest hardware status. + */ + ret = acpi_bus_get_status(adev); + if (ret) { + dev_err(dev, "unable to retrieve _STA\n"); + return ret; + } + + if (!adev->status.present) { + dev_err(dev, "RC is not enabled\n"); + return -ENODEV; + } + + hose = kzalloc(sizeof(*hose), GFP_KERNEL); + if (!hose) { + dev_err(dev, "out of memory when alloc mem for pci_controller\n"); + return -ENOMEM; + } + + /* Get Endpoint config space base address from MCFG table */ + mcfg_addr = cfg->res.start - (cfg->busr.start << cfg->ops->bus_shift); + + /** + * "__va(mcfg_addr)" is equal to "cfg->win", so we can also use + * "hose->ep_config_space_base = cfg->win" here + */ + hose->ep_config_space_base = __va(mcfg_addr); + + /* Init pci_controller */ + ret = sw64_pci_prepare_controller(hose, adev); + if (ret) { + kfree(hose); + dev_err(&adev->dev, "failed to init pci controller\n"); + return ret; + } + + cfg->priv = (void *)hose; + + return 0; +} + +const struct pci_ecam_ops sw64_pci_ecam_ops = { + .bus_shift = 24, + .init = sw64_pci_ecam_init, + .pci_ops = { + .map_bus = sw64_pcie_map_bus, + .read = sw64_pcie_config_read, + .write = sw64_pcie_config_write, + } +}; +#endif diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 70e61f719040..d278d56824b1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4803,7 +4803,11 @@ int pcie_flr(struct pci_dev *dev) * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ +#ifdef CONFIG_SW64 + msleep(1000); +#else msleep(100); +#endif return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 43159965e09e..622c53bf6abe 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -142,6 +142,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b3976dcb71f1..352284a22e0d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4537,6 +4537,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); +#ifndef CONFIG_SW64 /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the @@ -4593,6 +4594,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +#endif /* * pci_acs_ctrl_enabled - compare desired ACS controls with those provided -- Gitee From 1e0c8008b3816d8ad9b2dd40de400b75c656bb69 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:48:27 +0800 Subject: [PATCH 361/953] anolis: drivers: platform: add sw64 support ANBZ: #4688 Add platform drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/platform/Makefile | 1 + drivers/platform/sw64/Makefile | 2 + drivers/platform/sw64/legacy_xuelang.c | 63 ++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 drivers/platform/sw64/Makefile create mode 100644 drivers/platform/sw64/legacy_xuelang.c diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 41640172975a..8296d4c41eb7 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_LOONGARCH) += loongarch/ +obj-$(CONFIG_SW64) += sw64/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ diff --git a/drivers/platform/sw64/Makefile b/drivers/platform/sw64/Makefile new file mode 100644 index 000000000000..28922224fb17 --- /dev/null +++ b/drivers/platform/sw64/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += legacy_xuelang.o diff --git a/drivers/platform/sw64/legacy_xuelang.c b/drivers/platform/sw64/legacy_xuelang.c new file mode 100644 index 000000000000..8a63d9edf9f2 --- /dev/null +++ b/drivers/platform/sw64/legacy_xuelang.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include +#include + +static void vt_mode_kill_arch(int mode) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + hcall(HCALL_RESTART, 0, 0, 0); + mb(); + break; + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + hcall(HCALL_SHUTDOWN, 0, 0, 0); + mb(); + break; + default: + break; + } +} + +void sw64_halt(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_HALT); +} + +void sw64_poweroff(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_POWER_OFF); +} + +void sw64_restart(void) +{ + if (is_in_host()) { + fix_jm585_reset(); + cpld_write(0x64, 0x00, 0xc3); + } else + vt_mode_kill_arch(LINUX_REBOOT_CMD_RESTART); +} + +static int sw64_reset_init(void) +{ +#ifdef CONFIG_EFI + if (BIOS_SUPPORT_RESET_CLALLBACK((void *)bios_version)) + return 0; +#endif + pm_restart = sw64_restart; + pm_power_off = sw64_poweroff; + pm_halt = sw64_halt; + return 0; +} +subsys_initcall(sw64_reset_init); -- Gitee From 0278260db98a1596fdf992f79e8e4d4fff9298b3 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:46:49 +0800 Subject: [PATCH 362/953] anolis: drivers: qemu_fw_cfg: add sw64 support ANBZ: #4688 Add qemu_fw_cfg drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/firmware/Kconfig | 2 +- drivers/firmware/qemu_fw_cfg.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b59e3041fd62..9cb284a6e7a1 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) depends on HAS_IOPORT_MAP default n help diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7c0..f4fea1ec3201 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 -- Gitee From 719d30532d6111dc764b0f3459e58e4747aec343 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:54:17 +0800 Subject: [PATCH 363/953] anolis: drivers: rtc: add sw64 rtc support ANBZ: #4688 Add virtual rtc drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/rtc/Kconfig | 7 +++ drivers/rtc/Makefile | 5 ++ drivers/rtc/rtc-sw64-guest.c | 54 +++++++++++++++++++ drivers/rtc/rtc-sw64-virt-platform.c | 25 +++++++++ drivers/rtc/rtc-sw64-virt.c | 77 ++++++++++++++++++++++++++++ 5 files changed, 168 insertions(+) create mode 100644 drivers/rtc/rtc-sw64-guest.c create mode 100644 drivers/rtc/rtc-sw64-virt-platform.c create mode 100644 drivers/rtc/rtc-sw64-virt.c diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 92f46a6312c2..6f270577df86 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -984,6 +984,13 @@ config RTC_DRV_ALPHA Direct support for the real-time clock found on every Alpha system, specifically MC146818 compatibles. If in doubt, say Y. +config RTC_DRV_SW64_VIRT + bool "SW64 Hypervisor based RTC" + depends on SW64 + default y + help + Get support for the Hypervisor based RTC on SW64 systems. + config RTC_DRV_DS1216 tristate "Dallas DS1216" depends on SNI_RM diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index fd209883ee2e..7711f79787ac 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_SW64_VIRT +rtc-core-y += rtc-sw64-virt-platform.o +endif + rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o @@ -168,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_ST_LPC) += rtc-st-lpc.o obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o +obj-$(CONFIG_RTC_DRV_SW64_VIRT) += rtc-sw64-virt.o obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o diff --git a/drivers/rtc/rtc-sw64-guest.c b/drivers/rtc/rtc-sw64-guest.c new file mode 100644 index 000000000000..5d86ce20a1fb --- /dev/null +++ b/drivers/rtc/rtc-sw64-guest.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) + +static int sw_guest_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + rtc_time64_to_tm(*ioaddr, tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw_guest_ops = { + .read_time = sw_guest_read_time, +}; + +static int __init rtc_sw_guest_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw_guest", + &rtc_sw_guest_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw_guest_driver = { + .driver = { + .name = "rtc_sw_guest", + }, +}; + +module_platform_driver_probe(rtc_sw_guest_driver, rtc_sw_guest_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("SW GUEST RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw_guest"); diff --git a/drivers/rtc/rtc-sw64-virt-platform.c b/drivers/rtc/rtc-sw64-virt-platform.c new file mode 100644 index 000000000000..3db9ff2f0e64 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt-platform.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static struct platform_device rtc_sw64_virt_device = { + .name = "rtc_sw64_virt", + .id = -1, +}; + +static int __init rtc_sw64_virt_init(void) +{ + if (is_in_host()) + return 0; + + if (platform_device_register(&rtc_sw64_virt_device) < 0) + pr_err("unable to register rtc device...\n"); + /* not necessarily an error */ + return 0; +} +module_init(rtc_sw64_virt_init); diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c new file mode 100644 index 000000000000..23c93d7ddbae --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* rtc-sw64-virt.c: Hypervisor based RTC for SW64 systems. + * + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; + +static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw64_virt_ops = { + .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, +}; + +static int __init rtc_sw64_virt_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw64_virt", + &rtc_sw64_virt_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw64_virt_driver = { + .driver = { + .name = "rtc_sw64_virt", + }, +}; + +module_platform_driver_probe(rtc_sw64_virt_driver, rtc_sw64_virt_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("Sunway virtual RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw64_virt"); -- Gitee From adc02998952b47f88cb29156d1220a9ea5677cc8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:56:09 +0800 Subject: [PATCH 364/953] anolis: drivers: scsi: add sw64 support ANBZ: #4688 Add scsi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8a83f3fc2b86..d4b97f0a5013 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3589,6 +3589,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); +#ifdef CONFIG_SUBARCH_C3B + if (smid == 0xffff) { + smid = d_val.u.low >> 16; + if (smid == 0xffff) + break; + } +#endif + cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; -- Gitee From c64288d935d8329f87408cc9477c5c134b34fbcb Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:34 +0800 Subject: [PATCH 365/953] anolis: drivers: spi: add sw64 support ANBZ: #4688 Add spi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/spi/Kconfig | 6 + drivers/spi/Makefile | 1 + drivers/spi/spi-chip3-mmio.c | 147 +++++++++++++ drivers/spi/spi-chip3.c | 404 +++++++++++++++++++++++++++++++++++ drivers/spi/spi-chip3.h | 245 +++++++++++++++++++++ 5 files changed, 803 insertions(+) create mode 100644 drivers/spi/spi-chip3-mmio.c create mode 100644 drivers/spi/spi-chip3.c create mode 100644 drivers/spi/spi-chip3.h diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ce0fd5df8e9..60826b7ed21e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1179,6 +1179,12 @@ config SPI_AMD # # Add new SPI master controllers in alphabetical order above this line # +config SPI_CHIP3 + tristate "Memory-mapped io interface driver for SUNWAY CHIP3 SPI core" + depends on UNCORE_XUELANG + help + general driver for SPI controller core from DesignWare + comment "SPI Multiplexer support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6af54842b9fa..26bf16fcf890 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o +obj-$(CONFIG_SPI_CHIP3) += spi-chip3.o spi-chip3-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-chip3-mmio.c b/drivers/spi/spi-chip3-mmio.c new file mode 100644 index 000000000000..a907f13d4ae5 --- /dev/null +++ b/drivers/spi/spi-chip3-mmio.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory-mapped interface driver for SUNWAY CHIP3 SPI Core + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +#define DRIVER_NAME "sunway_chip3_spi" + +struct chip3_spi_mmio { + struct chip3_spi dws; + struct clk *clk; + void *priv; +}; + +static int chip3_spi_mmio_probe(struct platform_device *pdev) +{ + int (*init_func)(struct platform_device *pdev, + struct chip3_spi_mmio *dwsmmio); + struct chip3_spi_mmio *dwsmmio; + struct chip3_spi *dws; + struct resource *mem; + int ret; + int num_cs; + + dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct chip3_spi_mmio), + GFP_KERNEL); + if (!dwsmmio) + return -ENOMEM; + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dws->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dws->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(dws->regs); + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return dws->irq; /* -ENXIO */ + } + + dwsmmio->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dwsmmio->clk)) + return PTR_ERR(dwsmmio->clk); + ret = clk_prepare_enable(dwsmmio->clk); + if (ret) + return ret; + + dws->bus_num = pdev->id; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + device_property_read_u32(&pdev->dev, "reg-io-width", + &dws->reg_io_width); + + num_cs = 4; + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + dws->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < dws->num_cs; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } + + init_func = device_get_match_data(&pdev->dev); + if (init_func) { + ret = init_func(pdev, dwsmmio); + if (ret) + goto out; + } + + ret = chip3_spi_add_host(&pdev->dev, dws); + if (ret) + goto out; + + platform_set_drvdata(pdev, dwsmmio); + + return 0; +out: + clk_disable_unprepare(dwsmmio->clk); + return ret; +} + +static int chip3_spi_mmio_remove(struct platform_device *pdev) +{ + struct chip3_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + + chip3_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); + + return 0; +} + +static const struct of_device_id chip3_spi_mmio_of_match[] = { + { .compatible = "sunway,chip3-spi", }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, chip3_spi_mmio_of_match); + +static struct platform_driver chip3_spi_mmio_driver = { + .probe = chip3_spi_mmio_probe, + .remove = chip3_spi_mmio_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = chip3_spi_mmio_of_match, + }, +}; +module_platform_driver(chip3_spi_mmio_driver); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for Sunway CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.c b/drivers/spi/spi-chip3.c new file mode 100644 index 000000000000..8186c84eca8c --- /dev/null +++ b/drivers/spi/spi-chip3.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SUNWAY CHIP3 SPI core controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +/* Slave spi_dev related */ +struct chip_data { + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + void (*cs_control)(u32 command); +}; + +static void chip3_spi_handle_err(struct spi_controller *master, + struct spi_message *msg) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + + spi_reset_chip(dws); +} + +static size_t chip3_spi_max_length(struct spi_device *spi) +{ + struct chip3_spi *dws = spi_controller_get_devdata(spi->master); + + return dws->fifo_len; +} + +static int chip3_spi_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + u16 clk_div; + u32 freq; + u32 speed_hz; + u32 status; + u32 len = 0; + int ret = 0; + int i = 0; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(m->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_RECEIVE); + + spi_enable_chip(dws, 1); + + list_for_each_entry(t, &m->transfers, transfer_list) { + len += t->len; + /* Judge if data is overflow */ + if (len > dws->fifo_len) { + pr_err("SPI transfer overflow.\n"); + m->actual_length = 0; + m->status = -EIO; + ret = -EIO; + goto way_out; + } + + if (t->tx_buf) + memcpy(&dws->buf[len], t->tx_buf, t->len); + else + memset(&dws->buf[len], 0, t->len); + } + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(m->spi->chip_select)); + + do { + status = chip3_readl(dws, CHIP3_SPI_SR); + } while (status & SR_BUSY); + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->rx_buf) { + for (i = 0; i < t->len; i++, t->rx_buf += 1) + *(u8 *)t->rx_buf = chip3_readl(dws, CHIP3_SPI_DR); + } else { + for (i = 0; i < t->len; i++) + chip3_readl(dws, CHIP3_SPI_DR); + } + } + + m->actual_length = len; + m->status = 0; + spi_finalize_current_message(master); + +way_out: + return ret; +} + +static int chip3_spi_adjust_mem_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + op->data.nbytes = min((size_t)op->data.nbytes, (dws->fifo_len - len)); + if (!op->data.nbytes) + return -EINVAL; + + return 0; +} + +static int chip3_spi_init_mem_buf(struct chip3_spi *dws, + const struct spi_mem_op *op) +{ + int ret = 0; + int i, j, len; + + /* Calculate the total length of the transfer. */ + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + /* Judge if data is overflow */ + if (len + op->data.nbytes > dws->fifo_len) { + ret = -EIO; + goto way_out; + } + + /* + * Collect the operation code, address and dummy bytes into the single + * buffer. If it's a transfer with data to be sent, also copy it into + * the single buffer. + */ + for (i = 0; i < sizeof(op->cmd.opcode); i++) + dws->buf[i] = op->cmd.opcode; + for (j = 0; j < op->addr.nbytes; i++, j++) + dws->buf[i] = op->addr.val >> (8 * (op->addr.nbytes - i)); + for (j = 0; j < op->dummy.nbytes; i++, j++) + dws->buf[i] = 0xff; + + if (op->data.dir == SPI_MEM_DATA_OUT) { + memcpy(&dws->buf[i], op->data.buf.out, op->data.nbytes); + len += op->data.nbytes; + } + + dws->tx_len = len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dws->rx = op->data.buf.in; + dws->rx_len = op->data.nbytes; + } else { + dws->rx = NULL; + dws->rx_len = 0; + } + +way_out: + return ret; +} + +static int chip3_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + u16 clk_div; + int ret = 0; + int i; + unsigned short value; + u32 freq; + u32 speed_hz; + + ret = chip3_spi_init_mem_buf(dws, op); + if (ret) + return ret; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + if ((dws->tx_len != 0) && (dws->rx_len != 0)) { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_EEPROM_READ); + chip3_writel(dws, CHIP3_SPI_CTRL1, (dws->rx_len - 1)); + } else { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_ONLY); + } + + spi_enable_chip(dws, 1); + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < dws->tx_len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(mem->spi->chip_select)); + + value = chip3_readl(dws, CHIP3_SPI_SR); + while (value & SR_BUSY) + value = chip3_readl(dws, CHIP3_SPI_SR); + + for (i = 0; i < dws->rx_len; dws->rx += dws->n_bytes, i++) + *(u8 *)dws->rx = chip3_readl(dws, CHIP3_SPI_DR); + + return ret; +} + +/* This may be called twice for each spi dev */ +static int chip3_spi_setup(struct spi_device *spi) +{ + struct chip3_spi_chip *chip_info = NULL; + struct chip_data *chip; + u32 poll_mode = 0; + struct device_node *np = spi->dev.of_node; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } else { + if (np) { + of_property_read_u32(np, "poll_mode", &poll_mode); + chip->poll_mode = poll_mode; + } + + } + + chip->tmode = SPI_TMOD_TR; + return 0; +} + +static void chip3_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct device *dev, struct chip3_spi *dws) +{ + spi_reset_chip(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + chip3_writel(dws, CHIP3_SPI_TXFLTR, fifo); + if (fifo != chip3_readl(dws, CHIP3_SPI_TXFLTR)) + break; + } + chip3_writel(dws, CHIP3_SPI_TXFLTR, 0); + + dws->fifo_len = (fifo == 1) ? 0 : fifo; + dev_info(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); + } +} + +static const struct spi_controller_mem_ops chip3_mem_ops = { + .adjust_op_size = chip3_spi_adjust_mem_op_size, + .exec_op = chip3_spi_exec_mem_op, +}; + + +int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws) +{ + struct spi_controller *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + dws->master = master; + dws->type = SSI_MOTO_SPI; + + spi_controller_set_devdata(master, dws); + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->setup = chip3_spi_setup; + master->cleanup = chip3_spi_cleanup; + master->transfer_one_message = chip3_spi_transfer_one_message; + master->handle_err = chip3_spi_handle_err; + master->max_speed_hz = dws->max_freq; + master->dev.of_node = dev->of_node; + master->flags = SPI_CONTROLLER_GPIO_SS; + master->max_transfer_size = chip3_spi_max_length; + master->max_message_size = chip3_spi_max_length; + + master->mem_ops = &chip3_mem_ops; + + /* Basic HW init */ + spi_hw_init(dev, dws); + + ret = devm_spi_register_controller(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + spi_enable_chip(dws, 0); + free_irq(dws->irq, master); + } + + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_add_host); + +void chip3_spi_remove_host(struct chip3_spi *dws) +{ + spi_shutdown_chip(dws); + + free_irq(dws->irq, dws->master); +} +EXPORT_SYMBOL_GPL(chip3_spi_remove_host); + +int chip3_spi_suspend_host(struct chip3_spi *dws) +{ + int ret; + + ret = spi_controller_suspend(dws->master); + if (ret) + return ret; + + spi_shutdown_chip(dws); + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_suspend_host); + +int chip3_spi_resume_host(struct chip3_spi *dws) +{ + int ret; + + spi_hw_init(&dws->master->dev, dws); + ret = spi_controller_resume(dws->master); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(chip3_spi_resume_host); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Driver for Sunway CHIP3 SPI controller core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.h b/drivers/spi/spi-chip3.h new file mode 100644 index 000000000000..88e49a9091a5 --- /dev/null +++ b/drivers/spi/spi-chip3.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CHIP3_SPI_HEADER_H +#define CHIP3_SPI_HEADER_H + +#include +#include +#include +#include + +/* Register offsets */ +#define CHIP3_SPI_CTRL0 (0x00<<7) +#define CHIP3_SPI_CTRL1 (0x04<<7) +#define CHIP3_SPI_SSIENR (0x08<<7) +#define CHIP3_SPI_MWCR (0x0c<<7) +#define CHIP3_SPI_SER (0x10<<7) +#define CHIP3_SPI_BAUDR (0x14<<7) +#define CHIP3_SPI_TXFLTR (0x18<<7) +#define CHIP3_SPI_RXFLTR (0x1c<<7) +#define CHIP3_SPI_TXFLR (0x20<<7) +#define CHIP3_SPI_RXFLR (0x24<<7) +#define CHIP3_SPI_SR (0x28<<7) +#define CHIP3_SPI_IMR (0x2c<<7) +#define CHIP3_SPI_ISR (0x30<<7) +#define CHIP3_SPI_RISR (0x34<<7) +#define CHIP3_SPI_TXOICR (0x38<<7) +#define CHIP3_SPI_RXOICR (0x3c<<7) +#define CHIP3_SPI_RXUICR (0x40<<7) +#define CHIP3_SPI_MSTICR (0x44<<7) +#define CHIP3_SPI_ICR (0x48<<7) +#define CHIP3_SPI_DMACR (0x4c<<7) +#define CHIP3_SPI_DMATDLR (0x50<<7) +#define CHIP3_SPI_DMARDLR (0x54<<7) +#define CHIP3_SPI_IDR (0x58<<7) +#define CHIP3_SPI_VERSION (0x5c<<7) +#define CHIP3_SPI_DR (0x60<<7) + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 + +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in ISR, IMR, RISR, 7 bits */ +#define SPI_INT_TXEI (1 << 0) +#define SPI_INT_TXOI (1 << 1) +#define SPI_INT_RXUI (1 << 2) +#define SPI_INT_RXOI (1 << 3) +#define SPI_INT_RXFI (1 << 4) +#define SPI_INT_MSTI (1 << 5) + +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + +/* TX RX interrupt level threshold, max can be 256 */ +#define SPI_INT_THRESHOLD 32 + +/* The depth of the FIFO buffer is 256, so the max transfer length is 256. */ +#define MAX_LEN 256 + +/* The mode of spi controller. */ +#define SPI_TRANSMIT_RECEIVE 0x0c7 +#define SPI_EEPROM_READ 0x3c7 +#define SPI_TRANSMIT_ONLY 0x1c7 + +enum chip3_ssi_type { + SSI_MOTO_SPI = 0, + SSI_TI_SSP, + SSI_NS_MICROWIRE, +}; + +struct chip3_spi; + +struct chip3_spi { + struct spi_controller *master; + enum chip3_ssi_type type; + + void __iomem *regs; + unsigned long paddr; + int irq; + u32 fifo_len; /* depth of the FIFO buffer */ + u32 max_freq; /* max bus freq supported */ + + u32 reg_io_width; /* DR I/O width in bytes */ + u16 bus_num; + u16 num_cs; /* supported slave numbers */ + void (*set_cs)(struct spi_device *spi, bool enable); + + /* Current message transfer state info */ + size_t len; + void *tx; + unsigned int tx_len; + void *rx; + unsigned int rx_len; + u8 n_bytes; /* current is a 1/2 bytes op */ + u32 current_freq; /* frequency in hz */ + + u8 buf[MAX_LEN]; + + /* Bus interface info */ + void *priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif +}; + +static inline u32 chip3_readl(struct chip3_spi *dws, u32 offset) +{ + return __raw_readl(dws->regs + offset); +} + +static inline u16 chip3_readw(struct chip3_spi *dws, u32 offset) +{ + return __raw_readw(dws->regs + offset); +} + +static inline void chip3_writel(struct chip3_spi *dws, u32 offset, u32 val) +{ + __raw_writel(val, dws->regs + offset); +} + +static inline void chip3_writew(struct chip3_spi *dws, u32 offset, u16 val) +{ + __raw_writew(val, dws->regs + offset); +} + +static inline u32 chip3_read_io_reg(struct chip3_spi *dws, u32 offset) +{ + switch (dws->reg_io_width) { + case 2: + return chip3_readw(dws, offset); + case 4: + default: + return chip3_readl(dws, offset); + } +} + +static inline void chip3_write_io_reg(struct chip3_spi *dws, u32 offset, u32 val) +{ + switch (dws->reg_io_width) { + case 2: + chip3_writew(dws, offset, val); + break; + case 4: + default: + chip3_writel(dws, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct chip3_spi *dws, int enable) +{ + chip3_writel(dws, CHIP3_SPI_SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct chip3_spi *dws, u16 div) +{ + chip3_writel(dws, CHIP3_SPI_BAUDR, div); +} + +/* Disable IRQ bits */ +static inline void spi_mask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) & ~mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* Enable IRQ bits */ +static inline void spi_umask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) | mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* + * This does disable the SPI controller, interrupts, and re-enable the + * controller back. Transmit and receive FIFO buffers are cleared when the + * device is disabled. + */ +static inline void spi_reset_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); +} + +static inline void spi_shutdown_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); +} + +/* + * Each SPI slave device to work with chip3_api controller should + * has such a structure claiming its working mode (poll or PIO/DMA), + * which can be save in the "controller_data" member of the + * struct spi_device. + */ +struct chip3_spi_chip { + u8 poll_mode; /* 1 for controller polling mode */ + u8 type; /* SPI/SSP/MicroWire */ + u8 chip_select; + void (*cs_control)(u32 command); +}; + +extern int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws); +extern void chip3_spi_remove_host(struct chip3_spi *dws); +extern int chip3_spi_suspend_host(struct chip3_spi *dws); +extern int chip3_spi_resume_host(struct chip3_spi *dws); + +/* platform related setup */ +extern int chip3_spi_mid_init(struct chip3_spi *dws); /* Intel MID platforms */ +#endif /* CHIP3_SPI_HEADER_H */ -- Gitee From 0076772030654cb87bed528b68a8a08798252ca5 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:47 +0800 Subject: [PATCH 366/953] anolis: drivers: tty: add sw64 support ANBZ: #4688 Add tty drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/tty/serial/8250/8250_sunway.c | 786 ++++++++++++++++++++++++++ drivers/tty/serial/8250/Kconfig | 7 + drivers/tty/serial/8250/Makefile | 1 + 3 files changed, 794 insertions(+) create mode 100644 drivers/tty/serial/8250/8250_sunway.c diff --git a/drivers/tty/serial/8250/8250_sunway.c b/drivers/tty/serial/8250/8250_sunway.c new file mode 100644 index 000000000000..9e3db232c832 --- /dev/null +++ b/drivers/tty/serial/8250/8250_sunway.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Synopsys SUNWAY 8250 driver. + * + * Copyright 2011 Picochip, Jamie Iles. + * Copyright 2013 Intel Corporation + * + * The Synopsys SUNWAY 8250 has an extra feature whereby it detects if the + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +/* Offsets for the DesignWare specific registers */ +#define SUNWAY_UART_USR 0x1f /* UART Status Register */ +#define SUNWAY_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define SUNWAY_UART_CPR 0xf4 /* Component Parameter Register */ +#define SUNWAY_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define SUNWAY_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define SUNWAY_UART_CPR_AFCE_MODE (1 << 4) +#define SUNWAY_UART_CPR_THRE_MODE (1 << 5) +#define SUNWAY_UART_CPR_SIR_MODE (1 << 6) +#define SUNWAY_UART_CPR_SIR_LP_MODE (1 << 7) +#define SUNWAY_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define SUNWAY_UART_CPR_FIFO_ACCESS (1 << 9) +#define SUNWAY_UART_CPR_FIFO_STAT (1 << 10) +#define SUNWAY_UART_CPR_SHADOW (1 << 11) +#define SUNWAY_UART_CPR_ENCODED_PARMS (1 << 12) +#define SUNWAY_UART_CPR_DMA_EXTRA (1 << 13) +#define SUNWAY_UART_CPR_FIFO_MODE (0xff << 16) +/* Helper for fifo size calculation */ +#define SUNWAY_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +/* DesignWare specific register fields */ +#define SUNWAY_UART_MCR_SIRE BIT(6) + +struct sunway8250_data { + u8 usr_reg; + u8 dlf_size; + int line; + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct uart_8250_dma dma; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline u32 sunway8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void sunway8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +static inline int sunway8250_modify_msr(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Override any modem control signals if needed */ + if (offset == UART_MSR) { + value |= d->msr_mask_on; + value &= ~d->msr_mask_off; + } + + return value; +} + +static void sunway8250_force_idle(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + + serial8250_clear_and_reinit_fifos(up); + (void)p->serial_in(p, UART_RX); +} + +static void sunway8250_check_lcr(struct uart_port *p, int value) +{ + void __iomem *offset = p->membase + (UART_LCR << p->regshift); + int tries = 1000; + + /* Make sure LCR write wasn't ignored */ + while (tries--) { + unsigned int lcr = p->serial_in(p, UART_LCR); + + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) + return; + + sunway8250_force_idle(p); + +#ifdef CONFIG_64BIT + if (p->type == PORT_OCTEON) { + __raw_writeq(value & 0xff, offset); + continue; + } +#endif + if (p->iotype == UPIO_MEM32) + writel(value, offset); + else if (p->iotype == UPIO_MEM32BE) + iowrite32be(value, offset); + else + writeb(value, offset); + } + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ +} + +/* Returns once the transmitter is empty or we run out of retries */ +static void sunway8250_tx_wait_empty(struct uart_port *p) +{ + unsigned int tries = 20000; + unsigned int delay_threshold = tries - 1000; + unsigned int lsr; + + while (tries--) { + lsr = readb(p->membase + (UART_LSR << p->regshift)); + if (lsr & UART_LSR_TEMT) + break; + + /* + * The device is first given a chance to empty without delay, + * to avoid slowdowns at high bitrates. If after 1000 tries + * the buffer has still not emptied, allow more time for low- + * speed links. + */ + if (tries < delay_threshold) + udelay(1); + } +} + +static void sunway8250_serial_out38x(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Allow the TX to drain before we reconfigure */ + if (offset == UART_LCR) + sunway8250_tx_wait_empty(p); + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + + +static void sunway8250_serial_out(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in(struct uart_port *p, int offset) +{ + unsigned int value = readb(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +#ifdef CONFIG_64BIT +static unsigned int sunway8250_serial_inq(struct uart_port *p, int offset) +{ + unsigned int value; + + value = (u8)__raw_readq(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_outq(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + value &= 0xff; + __raw_writeq(value, p->membase + (offset << p->regshift)); + /* Read back to ensure register write ordering. */ + __raw_readq(p->membase + (UART_LCR << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} +#endif /* CONFIG_64BIT */ + +static void sunway8250_serial_out32(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writel(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32(struct uart_port *p, int offset) +{ + unsigned int value = readl(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_out32be(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + iowrite32be(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32be(struct uart_port *p, int offset) +{ + unsigned int value = ioread32be(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + + +static int sunway8250_handle_irq(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + struct sunway8250_data *d = p->private_data; + unsigned int iir = p->serial_in(p, UART_IIR); + unsigned int status; + unsigned long flags; + + /* + * There are ways to get Designware-based UARTs into a state where + * they are asserting UART_IIR_RX_TIMEOUT but there is no actual + * data available. If we see such a case then we'll do a bogus + * read. If we don't do this then the "RX TIMEOUT" interrupt will + * fire forever. + * + * This problem has only been observed so far when not in DMA mode + * so we limit the workaround only to non-DMA mode. + */ + if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) { + spin_lock_irqsave(&p->lock, flags); + status = p->serial_in(p, UART_LSR); + + if (!(status & (UART_LSR_DR | UART_LSR_BI))) + (void) p->serial_in(p, UART_RX); + + spin_unlock_irqrestore(&p->lock, flags); + } + + if (serial8250_handle_irq(p, iir)) + return 1; + + if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* Clear the USR */ + (void)p->serial_in(p, d->usr_reg); + + return 1; + } + + return 0; +} + +static void +sunway8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + pm_runtime_put_sync_suspend(port->dev); +} + +static void sunway8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct sunway8250_data *d = p->private_data; + long rate; + int ret; + + if (IS_ERR(d->clk)) + goto out; + + clk_disable_unprepare(d->clk); + rate = clk_round_rate(d->clk, baud * 16); + if (rate < 0) + ret = rate; + else if (rate == 0) + ret = -ENOENT; + else + ret = clk_set_rate(d->clk, rate); + clk_prepare_enable(d->clk); + + if (!ret) + p->uartclk = rate; + +out: + p->status &= ~UPSTAT_AUTOCTS; + if (termios->c_cflag & CRTSCTS) + p->status |= UPSTAT_AUTOCTS; + + serial8250_do_set_termios(p, termios, old); +} + +static void sunway8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= SUNWAY_UART_MCR_SIRE; + else + mcr &= ~SUNWAY_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + +/* + * sunway8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels + * assigned to the UART. + * + * REVISIT: This is a work around for limitation in the DMA Engine API. Once the + * core problem is fixed, this function is no longer needed. + */ +static bool sunway8250_fallback_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} + +static bool sunway8250_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int sunway8250_get_divisor(struct uart_port *p, + unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct sunway8250_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void sunway8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + sunway8250_writel_ext(p, SUNWAY_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +static void sunway8250_quirks(struct uart_port *p, struct sunway8250_data *data) +{ + if (p->dev->of_node) { + struct device_node *np = p->dev->of_node; + int id; + + /* get index of serial line, if found in DT aliases */ + id = of_alias_get_id(np, "serial"); + if (id >= 0) + p->line = id; +#ifdef CONFIG_64BIT + if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) { + p->serial_in = sunway8250_serial_inq; + p->serial_out = sunway8250_serial_outq; + p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; + p->type = PORT_OCTEON; + data->usr_reg = 0x27; + data->skip_autocfg = true; + } +#endif + if (of_device_is_big_endian(p->dev->of_node)) { + p->iotype = UPIO_MEM32BE; + p->serial_in = sunway8250_serial_in32be; + p->serial_out = sunway8250_serial_out32be; + } + if (of_device_is_compatible(np, "marvell,armada-38x-uart")) + p->serial_out = sunway8250_serial_out38x; + + } else if (acpi_dev_present("APMC0D08", NULL, -1)) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = sunway8250_serial_in32; + data->uart_16550_compatible = true; + } + + /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->dma.rx_param = p->dev->parent; + data->dma.tx_param = p->dev->parent; + data->dma.fn = sunway8250_idma_filter; + } +} + +static void sunway8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = sunway8250_readl_ext(p, SUNWAY_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + sunway8250_writel_ext(p, SUNWAY_UART_DLF, ~0U); + reg = sunway8250_readl_ext(p, SUNWAY_UART_DLF); + sunway8250_writel_ext(p, SUNWAY_UART_DLF, 0); + + if (reg) { + struct sunway8250_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = sunway8250_get_divisor; + p->set_divisor = sunway8250_set_divisor; + } + + reg = sunway8250_readl_ext(p, SUNWAY_UART_CPR); + if (!reg) + return; + + /* Select the type based on fifo */ + if (reg & SUNWAY_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = SUNWAY_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & SUNWAY_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & SUNWAY_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} + +static int sunway8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct uart_port *p = &uart.port; + struct device *dev = &pdev->dev; + struct sunway8250_data *data; + int err; + u32 val; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "cannot get irq\n"); + irq = 0; // Set serial poll mode + } + + spin_lock_init(&p->lock); + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = sunway8250_handle_irq; + p->pm = sunway8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; + p->iotype = UPIO_MEM; + p->serial_in = sunway8250_serial_in; + p->serial_out = sunway8250_serial_out; + p->set_ldisc = sunway8250_set_ldisc; + p->set_termios = sunway8250_set_termios; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dma.fn = sunway8250_fallback_dma_filter; + data->usr_reg = SUNWAY_UART_USR; + p->private_data = data; + + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + + err = device_property_read_u32(dev, "reg-shift", &val); + if (!err) + p->regshift = val; + + err = device_property_read_u32(dev, "reg-io-width", &val); + if (!err && val == 4) { + p->iotype = UPIO_MEM32; + p->serial_in = sunway8250_serial_in32; + p->serial_out = sunway8250_serial_out32; + } + + if (device_property_read_bool(dev, "dcd-override")) { + /* Always report DCD as active */ + data->msr_mask_on |= UART_MSR_DCD; + data->msr_mask_off |= UART_MSR_DDCD; + } + + if (device_property_read_bool(dev, "dsr-override")) { + /* Always report DSR as active */ + data->msr_mask_on |= UART_MSR_DSR; + data->msr_mask_off |= UART_MSR_DDSR; + } + + if (device_property_read_bool(dev, "cts-override")) { + /* Always report CTS as active */ + data->msr_mask_on |= UART_MSR_CTS; + data->msr_mask_off |= UART_MSR_DCTS; + } + + if (device_property_read_bool(dev, "ri-override")) { + /* Always report Ring indicator as inactive */ + data->msr_mask_off |= UART_MSR_RI; + data->msr_mask_off |= UART_MSR_TERI; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &p->uartclk); + + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get(dev, "baudclk"); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER) + data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR_OR_NULL(data->clk)) { + err = clk_prepare_enable(data->clk); + if (err) + dev_warn(dev, "could not enable optional baudclk: %d\n", + err); + else + p->uartclk = clk_get_rate(data->clk); + } + + /* If no clock rate is defined, fail. */ + if (!p->uartclk) { + dev_err(dev, "clock rate not defined\n"); + err = -EINVAL; + goto err_clk; + } + + data->pclk = devm_clk_get(dev, "apb_pclk"); + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_clk; + } + if (!IS_ERR(data->pclk)) { + err = clk_prepare_enable(data->pclk); + if (err) { + dev_err(dev, "could not enable apb_pclk\n"); + goto err_clk; + } + } + + data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(data->rst)) { + err = PTR_ERR(data->rst); + goto err_pclk; + } + reset_control_deassert(data->rst); + + sunway8250_quirks(p, data); + + /* If the Busy Functionality is not implemented, don't handle it */ + if (data->uart_16550_compatible) + p->handle_irq = NULL; + + if (!data->skip_autocfg) + sunway8250_setup_port(p); + + /* If we have a valid fifosize, try hooking up DMA */ + if (p->fifosize) { + data->dma.rxconf.src_maxburst = p->fifosize / 4; + data->dma.txconf.dst_maxburst = p->fifosize / 4; + uart.dma = &data->dma; + } + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto err_reset; + } + + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +err_reset: + reset_control_assert(data->rst); + +err_pclk: + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + +err_clk: + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + return err; +} + +static int sunway8250_remove(struct platform_device *pdev) +{ + struct sunway8250_data *data = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + serial8250_unregister_port(data->line); + + reset_control_assert(data->rst); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway8250_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int sunway8250_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int sunway8250_runtime_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + return 0; +} + +static int sunway8250_runtime_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); + + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + + return 0; +} +#endif + +static const struct dev_pm_ops sunway8250_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunway8250_suspend, sunway8250_resume) + SET_RUNTIME_PM_OPS(sunway8250_runtime_suspend, sunway8250_runtime_resume, NULL) +}; + +static const struct of_device_id sunway8250_of_match[] = { + { .compatible = "sw6,sunway-apb-uart" }, + { .compatible = "cavium,octeon-3860-uart" }, + { .compatible = "marvell,armada-38x-uart" }, + { .compatible = "renesas,rzn1-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway8250_of_match); + +static const struct acpi_device_id sunway8250_acpi_match[] = { + { "INT33C4", 0 }, + { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, + { "80860F0A", 0 }, + { "8086228A", 0 }, + { "APMC0D08", 0}, + { "AMD0020", 0 }, + { "AMDI0020", 0 }, + { "BRCM2032", 0 }, + { "HISI0031", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sunway8250_acpi_match); + +static struct platform_driver sunway8250_platform_driver = { + .driver = { + .name = "sunway-apb-uart", + .pm = &sunway8250_pm_ops, + .of_match_table = sunway8250_of_match, + .acpi_match_table = ACPI_PTR(sunway8250_acpi_match), + }, + .probe = sunway8250_probe, + .remove = sunway8250_remove, +}; + +module_platform_driver(sunway8250_platform_driver); + +MODULE_AUTHOR("Jamie Iles"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); +MODULE_ALIAS("platform:sunway-apb-uart"); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index ee17cf5c44c6..e8edd9388d76 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -407,6 +407,13 @@ config SERIAL_8250_DW Selecting this option will enable handling of the extra features present in the Synopsys DesignWare APB UART. +config SERIAL_8250_SUNWAY + tristate "Support for SW6B Builtin Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 && SW64 + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART of SW6. + config SERIAL_8250_EM tristate "Support for Emma Mobile integrated serial port" depends on SERIAL_8250 && HAVE_CLK diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 628b75be312e..8186ea891405 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +obj-$(CONFIG_SERIAL_8250_SUNWAY) += 8250_sunway.o obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o -- Gitee From fc24e16e95d5f6cfae68f6bcba2f318c3c8e5b69 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:58 +0800 Subject: [PATCH 367/953] anolis: drivers: usb: add sw64 support ANBZ: #4688 Add usb drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/usb/core/Makefile | 2 +- drivers/usb/host/pci-quirks.c | 127 ++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile index 7d338e9c0657..8ee58be1fb37 100644 --- a/drivers/usb/core/Makefile +++ b/drivers/usb/core/Makefile @@ -9,7 +9,7 @@ usbcore-y += devio.o notify.o generic.o quirks.o devices.o usbcore-y += phy.o port.o usbcore-$(CONFIG_OF) += of.o -usbcore-$(CONFIG_USB_PCI) += hcd-pci.o +usbcore-$(CONFIG_USB_PCI) += hcd-pci.o usbcore-$(CONFIG_ACPI) += usb-acpi.o ifdef CONFIG_USB_ONBOARD_HUB diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9add..498497cace20 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1283,3 +1283,130 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +#ifdef CONFIG_SW64 +#include +#define XHCI_STS_FATAL (1 << 2) +#define XHCI_STS_EINT (1 << 3) +#define XHCI_STS_PORT (1 << 4) +#define XHCI_STS_SRE (1 << 10) +#define STS_RW1C_BITS (XHCI_STS_FATAL | XHCI_STS_EINT | XHCI_STS_PORT | XHCI_STS_SRE) + +static void +fixup_usb_xhci_reset(struct pci_dev *dev) +{ + void __iomem *op_reg_base; + int timeout; + u32 xhci_command; + u32 tmp, val; + void __iomem *base; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long offset; + int ext_cap_offset; + int retries = 3; + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &tmp); + if (tmp & PCI_BASE_ADDRESS_MEM_TYPE_MASK) { + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); + offset = (unsigned long)(val) << 32 | (tmp & (~0xf)); + } else + offset = (unsigned long)(tmp & (~0xf)); + + if (offset == 0) + return; + + base = (void *)__va(SW64_PCI_IO_BASE(hose->node, hose->index) | offset); + + ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); + if (!ext_cap_offset) + goto hc_init; + + val = readl(base + ext_cap_offset); + + if ((dev->vendor == PCI_VENDOR_ID_TI && dev->device == 0x8241) || + (dev->vendor == PCI_VENDOR_ID_RENESAS + && dev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 1000000, 10); + if (timeout) { + pr_err("xHCI BIOS handoff failed (BIOS bug ?) %08x\n", val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + val &= XHCI_LEGACY_DISABLE_SMI; + val |= XHCI_LEGACY_SMI_EVENTS; + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (dev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(dev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000000, 10); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + +retry: + val = readl(op_reg_base + XHCI_STS_OFFSET); + val |= STS_RW1C_BITS; + writel(val, op_reg_base + XHCI_STS_OFFSET); + val = readl(op_reg_base + XHCI_STS_OFFSET); + + if ((val & STS_RW1C_BITS) && retries--) { + pr_err("clear USB Status Register (status = %#x) failed, retry\n", val); + goto retry; + } + + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW did not halt within %d usec status = 0x%x\n", + XHCI_MAX_HALT_USEC, val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, fixup_usb_xhci_reset); +#endif -- Gitee From 371428eedc9c0bae7132f9e13a1db3aa6477f512 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:56:19 +0800 Subject: [PATCH 368/953] anolis: drivers: vfio: add sw64 support ANBZ: #4688 Add vfio drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/vfio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 6bda6dbb4878..d80b6ffefd9d 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -39,7 +39,7 @@ config VFIO_GROUP config VFIO_CONTAINER bool "Support for the VFIO container /dev/vfio/vfio" - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64) depends on VFIO_GROUP default y help -- Gitee From a8b697ef3fece0e3c77cce2b10d79cdeca6200d5 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:11:13 +0800 Subject: [PATCH 369/953] anolis: sw64: fix a compile error for vrt_sigreturn ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/include/asm/linkage.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h index 85b279f6211e..1721753b4d98 100644 --- a/arch/sw_64/include/asm/linkage.h +++ b/arch/sw_64/include/asm/linkage.h @@ -6,4 +6,8 @@ #define SYSCALL_ALIAS(alias, name) \ asm (#alias " = " #name "\n\t.globl " #alias) +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name + #endif /* _ASM_SW64_LINKAGE_H */ -- Gitee From 26942d0f146d181ab15a2ee4543e1ff1b0897bb7 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:11:47 +0800 Subject: [PATCH 370/953] anolis: sw64: fix compile errors for CONFIG_FTRACE=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/kprobes/kprobes-ftrace.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c index 89d7dba9dc25..a0b33a52a9e4 100644 --- a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -11,14 +11,22 @@ /* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) + struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct kprobe *p; struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + int bit; + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) - return; + goto out; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { @@ -37,6 +45,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, } __this_cpu_write(current_kprobe, NULL); } +out: + preempt_enable_notrace(); + ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); -- Gitee From debf56b0eafd92e858f0a084fb45f7b5580e80ab Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:12:57 +0800 Subject: [PATCH 371/953] anolis: sw64: fix compile errors for CONFIG_SW64_CPUAUTOPLUG=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/cpuautoplug.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c index a7571a77a72c..b4ea0ef080d8 100644 --- a/arch/sw_64/kernel/cpuautoplug.c +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -234,7 +234,7 @@ static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) return (cputime64_t)jiffies_to_usecs(idle_time); } -static inline cputime64_t get_idle_time(cputime64_t *wall) +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) { unsigned int cpu; u64 idle_time = 0; @@ -378,7 +378,7 @@ static void do_autoplug_timer(struct work_struct *work) goto out; } - cur_idle_time = get_idle_time(&cur_wall_time); + cur_idle_time = sw64_get_idle_time(&cur_wall_time); if (cur_wall_time == 0) cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); @@ -441,11 +441,16 @@ static struct platform_driver platform_driver = { static int __init cpuautoplug_init(void) { int i, ret, delay; - - ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, - &cpuclass_attr_group); - if (ret) - return ret; + struct device *dev_root; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + ret = sysfs_create_group(&dev_root->kobj, + &cpuclass_attr_group); + put_device(dev_root); + if (ret) + return ret; + } ret = platform_driver_register(&platform_driver); if (ret) -- Gitee From 74e3b532a8d134444f18d73b524381703f1d1e10 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:43:50 +0800 Subject: [PATCH 372/953] anolis: sw64: fix compile errors for CONFIG_KEXEC_CORE=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/process.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c index fa58a0de4368..9a887140edef 100644 --- a/arch/sw_64/kernel/process.c +++ b/arch/sw_64/kernel/process.c @@ -103,6 +103,24 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) return 0; } +/* + * Fill in the user structure for a ELF core dump. + * @regs: should be signal_pt_regs() or task_pt_reg(task) + */ +void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs) +{ + int i; + struct thread_info *ti; + + ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1)); + + for (i = 0; i < 31; i++) + dest[i] = regs->regs[i]; + dest[31] = regs->pc; + dest[32] = ti->pcb.tp; +} +EXPORT_SYMBOL(sw64_elf_core_copy_regs); + unsigned long arch_randomize_brk(struct mm_struct *mm) { return randomize_page(mm->brk, 0x02000000); -- Gitee From 27ceee8e848f63ccbf474abf2bb2d668fbb070e2 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:46:14 +0800 Subject: [PATCH 373/953] anolis: drivers: virtio: add sw64 support ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/virtio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 0a53a61231c2..da89d498d14e 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -117,7 +117,7 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - depends on X86_64 || ARM64 + depends on X86_64 || ARM64 || SW64 depends on VIRTIO depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE -- Gitee From 86ebbe6e51da60196f93ba8f3018efdad5d6e3a9 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:49:09 +0800 Subject: [PATCH 374/953] anolis: sw64: fix compile errors for CONFIG_ADVISE_SYSCALLS=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/include/uapi/asm/mman.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h index 15cb7bfee3b1..c83c4b50662a 100644 --- a/arch/sw_64/include/uapi/asm/mman.h +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -2,6 +2,8 @@ #ifndef _UAPI_ASM_SW64_MMAN_H #define _UAPI_ASM_SW64_MMAN_H +#include + #define PROT_READ 0x1 /* page can be read */ #define PROT_WRITE 0x2 /* page can be written */ #define PROT_EXEC 0x4 /* page can be executed */ -- Gitee From 8bd20e201455a3f074b191ab768cc57b9f8f989c Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:54:02 +0800 Subject: [PATCH 375/953] anolis: sw64: remove nid parameter from arch_remove_memory() ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/mm/init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index ca761b602ab6..1f402809128f 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -306,8 +306,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) return ret; } -void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap) +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; -- Gitee From f4a53e47efae433a3f6a4e7be2cc9501c4268f1e Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:57:32 +0800 Subject: [PATCH 376/953] anolis: sw64: add initial anolis_xuelang_defconfig ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/configs/anolis_xuelang_defconfig | 1104 +++++++++++++++++++ 1 file changed, 1104 insertions(+) create mode 100644 arch/sw_64/configs/anolis_xuelang_defconfig diff --git a/arch/sw_64/configs/anolis_xuelang_defconfig b/arch/sw_64/configs/anolis_xuelang_defconfig new file mode 100644 index 000000000000..8c9c4dda69ed --- /dev/null +++ b/arch/sw_64/configs/anolis_xuelang_defconfig @@ -0,0 +1,1104 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=256 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_LIVEPATCH=y +CONFIG_NUMA=y +CONFIG_HZ=100 +CONFIG_BINFMT_MISC=m +CONFIG_USE_OF=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_NFIT=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_JUMP_LABEL=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_ALL is not set +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_ZSWAP=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_LRU_GEN=y +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DAMON_DBGFS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_ATM=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_X25=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_NET_SWITCHDEV=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_WIRELESS is not set +CONFIG_RFKILL=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_STUB=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_CONNECTOR=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AHA152X=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_SCSI_FDOMAIN_ISA=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_QLOGIC_FAS=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_ET131X=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_CAVIUM_PTP=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_JME=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_QCA7000_SPI=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_SFC=m +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PHYLIB=y +CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m +CONFIG_MDIO_BUS_MUX_MMIOREG=m +CONFIG_PPP=m +CONFIG_PPPOE=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=m +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_TCG_TIS=y +CONFIG_TCG_ATMEL=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=m +CONFIG_PMBUS=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_I6300ESB_WDT=m +CONFIG_SSB=y +CONFIG_RC_CORE=m +CONFIG_DRM=m +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_HID_ITE is not set +CONFIG_HID_LOGITECH=m +# CONFIG_HID_REDRAGON is not set +# CONFIG_I2C_HID is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_ERDMA=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_DAX=y +CONFIG_STM=m +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_F2FS_FS=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_CACHEFILES_ONDEMAND=y +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_FRAME_POINTER is not set +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_KEYBOARD=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=1 +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_STACK_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_BPF=m -- Gitee From 6835ecd8fa30f57e59e3b79be34014914dfe6ffd Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 09:01:27 +0800 Subject: [PATCH 377/953] anolis: sw64: add several options support ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/Kconfig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 0fd1be7195cc..62f655ceae3f 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -72,6 +72,7 @@ config SW64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KGDB + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE @@ -121,6 +122,12 @@ config SW64 select SW64_TIMER select SWIOTLB select THREAD_INFO_IN_TASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select IOMMU_DMA if IOMMU_SUPPORT + select ARCH_SUPPORTS_MEMORY_FAILURE + select HAVE_CONTEXT_TRACKING + select HAVE_NMI + select HAVE_DMA_CONTIGUOUS config LOCKDEP_SUPPORT def_bool y @@ -162,6 +169,10 @@ config GENERIC_CALIBRATE_DELAY bool default y +config ZONE_DMA + bool "Support DMA zone" if EXPERT + default y + config ZONE_DMA32 bool default y @@ -193,6 +204,10 @@ config SYS_HAS_EARLY_PRINTK config HAVE_CSRRW bool +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + menu "System setup" menu "Machine Configuration" @@ -395,6 +410,13 @@ config SCHED_SMT MultiThreading at a cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + config NR_CPUS int "Maximum number of CPUs (2-256)" range 2 256 @@ -636,6 +658,9 @@ config ARCH_HIBERNATION_POSSIBLE depends on SW64 def_bool y +config ARCH_SELECT_MEMORY_MODEL + def_bool ARCH_SPARSEMEM_ENABLE + source "drivers/cpuidle/Kconfig" source "drivers/idle/Kconfig" -- Gitee From 0f7f52d871bac90b603818e84cc053639d4f4b7c Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 9 Mar 2022 14:58:23 +0800 Subject: [PATCH 378/953] anolis: sched: introduce ACPU accounting ANBZ: #8547 When SMT is on, tasks will be disturbed by the tasks on it's SMT sibling, which will make the tasks running sometimes fast and sometimes slowly. So far, there isn't any way to assess how much disturbance the task has received. To assess the SMT disturbance, we introduce ACPU(assess CPU), which will account how long the task is running with SMT sibling idle. The statistical data is shown in /proc//sched, row se.core_sibidletime. Only when kernel.sched_schedstats is on, the data will be counted and shown. Co-developed-by: Cruz Zhao Signed-off-by: Tianchen Ding Signed-off-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/kernel_stat.h | 6 +++ include/linux/sched.h | 10 +++++ kernel/sched/core.c | 89 +++++++++++++++++++++++++++++++++++++ kernel/sched/cputime.c | 8 ++++ kernel/sched/debug.c | 3 ++ kernel/sched/sched.h | 6 +++ kernel/smpboot.c | 1 + lib/Kconfig.debug | 7 +++ 8 files changed, 130 insertions(+) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbfb9..955b08e976bb 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,6 +30,9 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, +#endif +#ifdef CONFIG_SCHED_ACPU + CPUTIME_SIBIDLE, #endif NR_STATS, }; @@ -133,5 +136,8 @@ extern void account_idle_ticks(unsigned long ticks); #ifdef CONFIG_SCHED_CORE extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); #endif +#ifdef CONFIG_SCHED_ACPU +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta); +#endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3f90d9ece7bd..050fc53e1d55 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -542,6 +542,10 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; #endif +#ifdef CONFIG_SCHED_ACPU + u64 core_sibidle_sum; +#endif + #endif /* CONFIG_SCHEDSTATS */ } ____cacheline_aligned; @@ -2495,3 +2499,9 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #endif + +#ifdef CONFIG_SCHED_ACPU +extern void acpu_enable(void); +#else +static inline void acpu_enable(void) { } +#endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index de44275a2b56..fdfdf5355ffb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -155,6 +155,10 @@ __read_mostly int scheduler_running; DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); +#ifdef CONFIG_SCHED_ACPU +DEFINE_STATIC_KEY_FALSE(acpu_enabled); +#endif + /* kernel prio, less is more */ static inline int __task_prio(const struct task_struct *p) { @@ -4989,6 +4993,83 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ +#ifdef CONFIG_SCHED_ACPU +void acpu_enable(void) +{ + int i; + + for_each_possible_cpu(i) { + struct rq *rq = cpu_rq(i); + + /* It may be not that accurate, but useful enough. */ + rq->last_acpu_update_time = rq->clock; + } + static_branch_enable(&acpu_enabled); +} + +static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ + const int cpu = cpu_of(rq); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + u64 now = rq_clock(rq); + u64 sibidle_sum, last_update_time; + s64 delta, last; + int i; + + if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) + return; + + /* Update idle sum and busy sum for current rq. */ + delta = now - rq->last_acpu_update_time; + if (prev == rq->idle) + rq->acpu_idle_sum += delta; + + /* + * Be carefule, smt_mask maybe NULL. + * We only consider the case where there are two SMT at this stage. + */ + if (unlikely(!smt_mask) || unlikely(cpumask_weight(smt_mask) != 2)) + goto out; + + for_each_cpu(i, smt_mask) { + if (i != cpu) { + struct rq *rq_i = cpu_rq(i); + struct task_struct *curr_i = rq_i->curr; + + last = (s64)(rq->last_acpu_update_time - + rq_i->last_acpu_update_time); + last_update_time = last >= 0 ? rq->last_acpu_update_time : + rq_i->last_acpu_update_time; + /* + * Sibling may update acpu at the same time, and it's + * timestamp may be newer than this rq. + */ + delta = now - last_update_time; + delta = delta > 0 ? delta : 0; + + /* Add the delta to improve accuracy. */ + sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) + sibidle_sum += delta; + } + } + + if (prev != rq->idle) { + delta = sibidle_sum - rq->sibidle_sum; + delta = delta > 0 ? delta : 0; + __account_sibidle_time(prev, delta); + } + + rq->sibidle_sum = sibidle_sum; +out: + rq->last_acpu_update_time = now; +} +#else +static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ +} +#endif /* CONFIG_SCHED_ACPU */ + static inline void prepare_task(struct task_struct *next) { #ifdef CONFIG_SMP @@ -5197,6 +5278,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, { kcov_prepare_switch(prev); sched_info_switch(rq, prev, next); + update_acpu(rq, prev, next); perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -5669,6 +5751,7 @@ void scheduler_tick(void) thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); + update_acpu(rq, curr, curr); if (sched_feat(LATENCY_WARN)) resched_latency = cpu_resched_latency(rq); calc_global_load_tick(rq); @@ -10054,6 +10137,12 @@ void __init sched_init(void) rcuwait_init(&rq->hotplug_wait); #endif #endif /* CONFIG_SMP */ + +#ifdef CONFIG_SCHED_ACPU + rq->acpu_idle_sum = 0; + rq->sibidle_sum = 0; + rq->last_acpu_update_time = rq->clock; +#endif hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index af7952f12e6c..d411d1aaf937 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -244,6 +244,14 @@ void __account_forceidle_time(struct task_struct *p, u64 delta) task_group_account_field(p, CPUTIME_FORCEIDLE, delta); } #endif +#ifdef CONFIG_SCHED_ACPU +void __account_sibidle_time(struct task_struct *p, u64 delta) +{ + __schedstat_add(p->stats.core_sibidle_sum, delta); + + task_group_account_field(p, CPUTIME_SIBIDLE, delta); +} +#endif /* * When a guest is interrupted for a longer amount of time, missed clock diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 4c3d0d9f3db6..464fa6b7c2a9 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1059,6 +1059,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); +#endif +#ifdef CONFIG_SCHED_ACPU + PN_SCHEDSTAT(core_sibidle_sum); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1a02d4c602b1..51ddd8f7e4ce 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1166,6 +1166,12 @@ struct rq { call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; #endif + +#ifdef CONFIG_SCHED_ACPU + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 last_acpu_update_time; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/smpboot.c b/kernel/smpboot.c index f47d8f375946..95abfe8d6996 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -73,6 +73,7 @@ void __init idle_threads_init(void) if (cpu != boot_cpu) idle_init(cpu); } + acpu_enable(); } #endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d2f73bb4121b..1612d79eb33e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1255,6 +1255,13 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. +config SCHED_ACPU + bool "ACPU info: account idle time of smt to task" + depends on DEBUG_KERNEL && PROC_FS && SMP && SCHED_SMT + default y + help + Add ACPU info in /proc//sched. + endmenu config DEBUG_TIMEKEEPING -- Gitee From 88cf1d4c0684e3e8c020240a1ae9670fb9fbd2e1 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Tue, 19 Sep 2023 17:22:19 +0800 Subject: [PATCH 379/953] anolis: sched: introduce sysctl_sched_acpu_enabled ANBZ: #8547 In order to be able to dynamically turn on and off acpu accounting, we introduce sysctl_sched_acpu_enabled, instead of default on. Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/sched.h | 6 ------ include/linux/sched/sysctl.h | 6 ++++++ kernel/sched/core.c | 32 +++++++++++++++++++++++++++++++- kernel/smpboot.c | 1 - kernel/sysctl.c | 11 +++++++++++ 5 files changed, 48 insertions(+), 8 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 050fc53e1d55..9553be02b888 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2499,9 +2499,3 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #endif - -#ifdef CONFIG_SCHED_ACPU -extern void acpu_enable(void); -#else -static inline void acpu_enable(void) { } -#endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 5a64582b086b..1c45773304fc 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -29,4 +29,10 @@ extern int sysctl_numa_balancing_mode; #define sysctl_numa_balancing_mode 0 #endif +#ifdef CONFIG_SCHED_ACPU +extern unsigned int sysctl_sched_acpu_enabled; +extern int sched_acpu_enable_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fdfdf5355ffb..311e7e0b0507 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -157,6 +157,7 @@ DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); #ifdef CONFIG_SCHED_ACPU DEFINE_STATIC_KEY_FALSE(acpu_enabled); +unsigned int sysctl_sched_acpu_enabled; #endif /* kernel prio, less is more */ @@ -4994,7 +4995,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ #ifdef CONFIG_SCHED_ACPU -void acpu_enable(void) +static void acpu_enable(void) { int i; @@ -5007,6 +5008,35 @@ void acpu_enable(void) static_branch_enable(&acpu_enabled); } +static void acpu_disable(void) +{ + static_branch_disable(&acpu_enabled); +} + +int sched_acpu_enable_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + unsigned int old, new; + + if (!write) { + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + return ret; + } + + old = sysctl_sched_acpu_enabled; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + new = sysctl_sched_acpu_enabled; + if (!ret && write && (old != new)) { + if (new) + acpu_enable(); + else + acpu_disable(); + } + + return ret; +} + static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) { const int cpu = cpu_of(rq); diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 95abfe8d6996..f47d8f375946 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -73,7 +73,6 @@ void __init idle_threads_init(void) if (cpu != boot_cpu) idle_init(cpu); } - acpu_enable(); } #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 204528a81b43..47bdd8216fc5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2068,6 +2068,17 @@ static struct ctl_table kern_table[] = { .extra2 = &userns_max_level_max, }, #endif +#ifdef CONFIG_SCHED_ACPU + { + .procname = "sched_acpu", + .data = &sysctl_sched_acpu_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_acpu_enable_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif /* CONFIG_SCHED_ACPU*/ { } }; -- Gitee From 57acca330d3010576fa0acf84293aec44fea2d03 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Sat, 30 Sep 2023 14:52:52 +0800 Subject: [PATCH 380/953] anolis: sched: account sibidle for core scheduling ANBZ: #8547 Accounting for sibling idle time for core scheduling, which is time where a cookie'd task running with SMT sibling idle, including sibling forced idle time and sibling real idle time, collectively called sibidle. A few details: - For SMT > 2, we scale the amount of idle charged based on the number of idle siblings in function account_sibidle_time(). Additionally, we split the time up and evenly charge it to all running tasks, as each is equally responsible forthe idle. - When core sched is enabled and sibidle count is not zero, we account sibidle in function account_sibidle_time(), otherwise in function update_acpu(). Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/kernel_stat.h | 9 +++---- include/linux/sched.h | 2 +- kernel/sched/core.c | 50 ++++++++++++++++++++++++------------- kernel/sched/core_sched.c | 45 ++++++++++++++++++++------------- kernel/sched/cputime.c | 21 +++++++--------- kernel/sched/sched.h | 13 +++++----- 6 files changed, 81 insertions(+), 59 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 955b08e976bb..01f0c6391a98 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -31,7 +31,7 @@ enum cpu_usage_stat { #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, #endif -#ifdef CONFIG_SCHED_ACPU +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) CPUTIME_SIBIDLE, #endif NR_STATS, @@ -133,11 +133,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); -#ifdef CONFIG_SCHED_CORE -extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); -#endif -#ifdef CONFIG_SCHED_ACPU -extern void __account_sibidle_time(struct task_struct *tsk, u64 delta); +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 9553be02b888..3c39c38f78b9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -542,7 +542,7 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; #endif -#ifdef CONFIG_SCHED_ACPU +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) u64 core_sibidle_sum; #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 311e7e0b0507..46e13ea43fe4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -373,7 +373,7 @@ static void __sched_core_flip(bool enabled) for_each_cpu(t, smt_mask) cpu_rq(t)->core_enabled = enabled; - cpu_rq(cpu)->core->core_forceidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start = 0; sched_core_unlock(cpu, &flags); @@ -5049,6 +5049,15 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) return; + /* + * If core sched is enabled and core_sibidle_count is not zero, we update sibidle + * time in function __sched_core_account_sibidle(). + */ +#ifdef CONFIG_SCHED_CORE + if (rq->core->core_sibidle_count) + goto out; +#endif + /* Update idle sum and busy sum for current rq. */ delta = now - rq->last_acpu_update_time; if (prev == rq->idle) @@ -5087,7 +5096,7 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str if (prev != rq->idle) { delta = sibidle_sum - rq->sibidle_sum; delta = delta > 0 ? delta : 0; - __account_sibidle_time(prev, delta); + __account_sibidle_time(prev, delta, false); } rq->sibidle_sum = sibidle_sum; @@ -6249,18 +6258,21 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* reset state */ rq->core->core_cookie = 0UL; - if (rq->core->core_forceidle_count) { + if (rq->core->core_sibidle_count) { if (!core_clock_updated) { update_rq_clock(rq->core); core_clock_updated = true; } - sched_core_account_forceidle(rq); + sched_core_account_sibidle(rq); /* reset after accounting force idle */ - rq->core->core_forceidle_start = 0; - rq->core->core_forceidle_count = 0; - rq->core->core_forceidle_occupation = 0; - need_sync = true; - fi_before = true; + rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_count = 0; + rq->core->core_sibidle_occupation = 0; + if (rq->core->core_forceidle_count) { + rq->core->core_forceidle_count = 0; + need_sync = true; + fi_before = true; + } } /* @@ -6336,6 +6348,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) rq_i->core_pick = p; if (p == rq_i->idle) { + rq->core->core_sibidle_count++; if (rq_i->nr_running) { rq->core->core_forceidle_count++; if (!fi_before) @@ -6346,9 +6359,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } } - if (schedstat_enabled() && rq->core->core_forceidle_count) { - rq->core->core_forceidle_start = rq_clock(rq->core); - rq->core->core_forceidle_occupation = occ; + if (schedstat_enabled() && rq->core->core_sibidle_count) { + rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_occupation = occ; } rq->core->core_pick_seq = rq->core->core_task_seq; @@ -6390,7 +6403,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (!(fi_before && rq->core->core_forceidle_count)) task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); - rq_i->core_pick->core_occupation = occ; + if (rq->core->core_forceidle_count) + rq_i->core_pick->core_occupation = occ; if (i == cpu) { rq_i->core_pick = NULL; @@ -6605,14 +6619,15 @@ static void sched_core_cpu_deactivate(unsigned int cpu) core_rq->core_cookie = rq->core_cookie; core_rq->core_forceidle_count = rq->core_forceidle_count; core_rq->core_forceidle_seq = rq->core_forceidle_seq; - core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; + core_rq->core_sibidle_occupation = rq->core_sibidle_occupation; + core_rq->core_sibidle_count = rq->core_sibidle_count; /* * Accounting edge for forced idle is handled in pick_next_task(). * Don't need another one here, since the hotplug thread shouldn't * have a cookie. */ - core_rq->core_forceidle_start = 0; + core_rq->core_sibidle_start = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -10182,8 +10197,9 @@ void __init sched_init(void) rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; - rq->core_forceidle_occupation = 0; - rq->core_forceidle_start = 0; + rq->core_sibidle_count = 0; + rq->core_sibidle_occupation = 0; + rq->core_sibidle_start = 0; rq->core_cookie = 0UL; #endif diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index a57fd8f27498..8db2999e51c8 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -237,7 +237,7 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, #ifdef CONFIG_SCHEDSTATS /* REQUIRES: rq->core's clock recently updated. */ -void __sched_core_account_forceidle(struct rq *rq) +void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); @@ -247,28 +247,31 @@ void __sched_core_account_forceidle(struct rq *rq) lockdep_assert_rq_held(rq); - WARN_ON_ONCE(!rq->core->core_forceidle_count); + WARN_ON_ONCE(!rq->core->core_sibidle_count); - if (rq->core->core_forceidle_start == 0) - return; + /* can't be forced idle without a running task */ + WARN_ON_ONCE(!rq->core->core_sibidle_occupation && + rq->core->core_forceidle_count); + + if (rq->core->core_sibidle_start == 0 || + rq->core->core_sibidle_occupation == 0) + goto out; - delta = now - rq->core->core_forceidle_start; + delta = now - rq->core->core_sibidle_start; if (unlikely((s64)delta <= 0)) - return; + goto out; - rq->core->core_forceidle_start = now; + rq->core->core_sibidle_start = now; - if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) { - /* can't be forced idle without a running task */ - } else if (rq->core->core_forceidle_count > 1 || - rq->core->core_forceidle_occupation > 1) { + if (rq->core->core_sibidle_count > 1 || + rq->core->core_sibidle_occupation > 1) { /* * For larger SMT configurations, we need to scale the charged * forced idle amount since there can be more than one forced * idle sibling and more than one running cookied task. */ - delta *= rq->core->core_forceidle_count; - delta = div_u64(delta, rq->core->core_forceidle_occupation); + delta *= rq->core->core_sibidle_count; + delta = div_u64(delta, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -279,22 +282,30 @@ void __sched_core_account_forceidle(struct rq *rq) continue; /* - * Note: this will account forceidle to the current cpu, even + * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_forceidle_time(p, delta); + __account_sibidle_time(p, delta, !!rq->core->core_forceidle_count); + } + +out: +#ifdef CONFIG_SCHED_ACPU + for_each_cpu(i, smt_mask) { + rq_i = cpu_rq(i); + rq->last_acpu_update_time = now; } +#endif } void __sched_core_tick(struct rq *rq) { - if (!rq->core->core_forceidle_count) + if (!rq->core->core_sibidle_count) return; if (rq != rq->core) update_rq_clock(rq->core); - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index d411d1aaf937..a443663572db 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -231,25 +231,22 @@ void account_idle_time(u64 cputime) } -#ifdef CONFIG_SCHED_CORE +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) /* - * Account for forceidle time due to core scheduling. + * Account for sibidle, and for forceidle time due to core scheduling. * * REQUIRES: schedstat is enabled. */ -void __account_forceidle_time(struct task_struct *p, u64 delta) -{ - __schedstat_add(p->stats.core_forceidle_sum, delta); - - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); -} -#endif -#ifdef CONFIG_SCHED_ACPU -void __account_sibidle_time(struct task_struct *p, u64 delta) +void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) { __schedstat_add(p->stats.core_sibidle_sum, delta); - task_group_account_field(p, CPUTIME_SIBIDLE, delta); +#ifdef CONFIG_SCHED_CORE + if (fi) { + __schedstat_add(p->stats.core_forceidle_sum, delta); + task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + } +#endif } #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 51ddd8f7e4ce..b99bad980d68 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1155,8 +1155,9 @@ struct rq { unsigned long core_cookie; unsigned int core_forceidle_count; unsigned int core_forceidle_seq; - unsigned int core_forceidle_occupation; - u64 core_forceidle_start; + unsigned int core_sibidle_occupation; + u64 core_sibidle_start; + unsigned int core_sibidle_count; #endif /* Scratch cpumask to be temporarily used under rq_lock */ @@ -1967,12 +1968,12 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p) #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) -extern void __sched_core_account_forceidle(struct rq *rq); +extern void __sched_core_account_sibidle(struct rq *rq); -static inline void sched_core_account_forceidle(struct rq *rq) +static inline void sched_core_account_sibidle(struct rq *rq) { if (schedstat_enabled()) - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } extern void __sched_core_tick(struct rq *rq); @@ -1985,7 +1986,7 @@ static inline void sched_core_tick(struct rq *rq) #else -static inline void sched_core_account_forceidle(struct rq *rq) {} +static inline void sched_core_account_sibidle(struct rq *rq) {} static inline void sched_core_tick(struct rq *rq) {} -- Gitee From adedc5b069af19a2b5b92eeccda0a59bff700a52 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Mon, 18 Sep 2023 10:24:20 +0800 Subject: [PATCH 381/953] anolis: sched: introduce per cgroup sibidle accounting ANBZ: #8547 This patch extends per task sibidle accounting into cgroups. rstat is used for cgroup accounting, except for the root, which uses kcpustat in order to bypass the need for doing an rstat flush when reading root stats. Data is displayed via /sys/fs/cgroup/cpu//cpu.stat, row sibidle_sum. Similar to the task accounting, the cgroup accounting requires that schedstats is enabled. Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/cgroup-defs.h | 3 +++ kernel/cgroup/rstat.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 265da00a1a8b..95bc8b0a231f 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -305,6 +305,9 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_sum; +#endif }; /* diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d80d7a608141..a29c5275c68e 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -327,6 +327,9 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum += src_bstat->sibidle_sum; +#endif } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -338,6 +341,9 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; +#endif } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -430,6 +436,11 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + case CPUTIME_SIBIDLE: + rstatc->bstat.sibidle_sum += delta_exec; + break; #endif default: break; @@ -473,6 +484,9 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; #endif } } @@ -485,6 +499,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE u64 forceidle_time; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_time; +#endif if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); @@ -493,6 +510,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = cgrp->bstat.sibidle_sum; #endif cgroup_rstat_flush_release(); } else { @@ -502,6 +522,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = bstat.sibidle_sum; #endif } @@ -511,6 +534,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + do_div(sibidle_time, NSEC_PER_USEC); +#endif seq_printf(seq, "usage_usec %llu\n" "user_usec %llu\n" @@ -520,6 +546,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); +#endif } /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ -- Gitee From 476d168139cda3c3b3233dcb7c61e9ed4d73eb4e Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 10 Nov 2023 11:21:19 +0800 Subject: [PATCH 382/953] anolis: sched: fix percpu account for CPUTIME_*IDLE ANBZ: #8547 Since commit 0a1658bedfa7 ("sched/core: add forced idle accounting for cgroups") add percpu status CPUTIME_FORCEIDLE, but task_group_account_field() will add time delta on the current cpu instead of the forceidle cpu. As a result, the total sum is correct, but percpu value is wrong. Fix it by adding delta to the right cpu. Signed-off-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- kernel/sched/cputime.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a443663572db..6e995f4857be 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -239,12 +239,16 @@ void account_idle_time(u64 cputime) */ void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) { + unsigned int cpu = task_cpu(p); + __schedstat_add(p->stats.core_sibidle_sum, delta); - task_group_account_field(p, CPUTIME_SIBIDLE, delta); + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); #ifdef CONFIG_SCHED_CORE if (fi) { __schedstat_add(p->stats.core_forceidle_sum, delta); - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); } #endif } -- Gitee From 52cf6e269208cba56fee94b7b0f25c20a45a5c84 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Wed, 8 Nov 2023 11:22:08 +0800 Subject: [PATCH 383/953] anolis: sched: enable CONFIG_SCHED_ACPU by default ANBZ: #8547 Enable CONFIG_SCHED_ACPU by default Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index eb020b30c47d..e4df4d2eb48c 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -114,6 +114,7 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_PREEMPT_COUNT=y # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index aba19cdc5358..c2849134d486 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -112,6 +112,7 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 0a31d38d115c..3c4c4795fb2d 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -134,6 +134,7 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 0b0922337278..35b9d5b8a1cc 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -133,6 +133,7 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting -- Gitee From 9d245d83aee37fc5fdc623b78b1bd0246601dd3c Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 20 Mar 2024 17:11:07 +0800 Subject: [PATCH 384/953] anolis: kfence: fix a NULL pointer dereference ANBZ: #8499 When kfence pool exhausted, object will be NULL. Do a check before get its refcnt. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2922 --- mm/kfence/core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 17f3dda4ebf7..571ef4dcccf8 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -613,7 +613,8 @@ get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); list_del_init(&object->list); } - percpu_ref_get(&object->kpa->refcnt); + if (object) + percpu_ref_get(&object->kpa->refcnt); raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); return object; @@ -684,8 +685,8 @@ static struct kfence_metadata *get_free_meta(int real_node) list_del_init(&object->list); c->count--; } - - percpu_ref_get(&object->kpa->refcnt); + if (object) + percpu_ref_get(&object->kpa->refcnt); put_cpu_ptr(c); local_irq_restore(flags); -- Gitee From 441d28e48c0acb90ce9e7f9ef983fc214508ac4f Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Tue, 22 Nov 2022 16:38:20 +0800 Subject: [PATCH 385/953] anolis: mm/early_ioremap.c: Always build early_memremap_prot() in x86 commit e1347ea0e4825294d441f6d8b4405412774ef313 OpenAnolis. ANBZ: #3267 In some scenarios, we'd want to specify protection attributes when we are doing early memory map. As early_memremap_prot() is also defined in loongarch it's better to select ARCH_USE_MEMREMAP_PROT on X86 config to avoid redefined error. Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/932 [ hly: Fix conflict. ] Signed-off-by: hanliyang Link: https://gitee.com/anolis/cloud-kernel/pulls/2917 --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4c75a7968948..a649d7c04fb8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -99,6 +99,7 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG -- Gitee From 06b793617726607428b807f481bfe8a2955733a9 Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Tue, 22 Nov 2022 16:02:20 +0800 Subject: [PATCH 386/953] anolis: x86/setup: Preserve _ENC flag when initrd is being relocated commit ea30196aea830c17565060644034ac7183d27a1a OpenAnolis. ANBZ: #3267 Commit 107cd2532181 ("Encrypt the initrd earlier for BSP microcode update") when SME is enabled, initrd will be encrypted at earlier stage. If initrd is located at e820 reserved area the initrd will be copied to direct mapping area in relocate_initrd(). In this case source address of initrd should be mapped as encrypted while copy_from_early_mem() will clear encrypted attribute as the source address is not in kernel usable area, therefore relocated initrd is encrypted data and is not able to be unpacked later. Add new function copy_early_initrd() to preserve _ENC flag in setup.c and remove copy_from_early_mem() as it's only used once here by x86. Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/932 Signed-off-by: hanliyang Link: https://gitee.com/anolis/cloud-kernel/pulls/2917 --- arch/x86/kernel/setup.c | 30 ++++++++++++++++++++++++++++- include/asm-generic/early_ioremap.h | 6 ------ mm/early_ioremap.c | 21 -------------------- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 09c3ea904ba5..01cf311c02ac 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -250,6 +250,34 @@ static u64 __init get_ramdisk_size(void) return ramdisk_size; } +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +static void __init copy_early_initrd(void *dest, phys_addr_t src, + unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = offset_in_page(src); + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + /* + * _ENC flag should be preserved so that when SME is enabled initrd + * can be mapped as encrypted, as it had been encrypted earlier. + * This flag won't impact on other platforms like TDX/SEV enabled. + */ + p = early_memremap_prot(src & PAGE_MASK, clen + slop, + pgprot_val(FIXMAP_PAGE_NORMAL)); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ @@ -269,7 +297,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); - copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + copy_early_initrd((void *)initrd_start, ramdisk_image, ramdisk_size); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97..be1ce406f481 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -32,12 +32,6 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); -/* - * Early copy from unmapped memory to kernel mapped memory. - */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, - unsigned long size); - #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index ce06b2884789..9d4d27399f80 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -243,27 +243,6 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size, } #endif -#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) - -void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) -{ - unsigned long slop, clen; - char *p; - - while (size) { - slop = offset_in_page(src); - clen = size; - if (clen > MAX_MAP_CHUNK - slop) - clen = MAX_MAP_CHUNK - slop; - p = early_memremap(src & PAGE_MASK, clen + slop); - memcpy(dest, p + slop, clen); - early_memunmap(p, clen + slop); - dest += clen; - src += clen; - size -= clen; - } -} - #else /* CONFIG_MMU */ void __init __iomem * -- Gitee From 617e1d05d24b461d1c31493363f3b92faf210fc7 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 11:18:13 +0800 Subject: [PATCH 387/953] anolis: configs: refresh kconfigs ANBZ: #8598 Refresh kconfigs by follow command, No Functional Change. `ARCH=${arch} CROSS_COMPILE=script/dummy-tools make olddefconfig` Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 6 +++++- arch/arm64/configs/anolis_defconfig | 7 ++++++- arch/x86/configs/anolis-debug_defconfig | 14 ++++++++++++-- arch/x86/configs/anolis_defconfig | 10 ++++++++-- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index e4df4d2eb48c..4c2362da512f 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -114,7 +114,6 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_PREEMPT_COUNT=y # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2503,6 +2502,7 @@ CONFIG_ATA_PIIX=y # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3414,11 +3414,13 @@ CONFIG_I2C_NFORCE2=m # CONFIG_I2C_SIS96X is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # # CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4485,6 +4487,7 @@ CONFIG_FB_EFI=y CONFIG_FB_SIMPLE=y CONFIG_FB_SSD1307=m # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y # CONFIG_FIRMWARE_EDID is not set @@ -6924,6 +6927,7 @@ CONFIG_WQ_WATCHDOG=y CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index c2849134d486..b659891af7af 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -112,7 +112,6 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2523,6 +2522,7 @@ CONFIG_ATA_PIIX=y # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3434,11 +3434,13 @@ CONFIG_I2C_NFORCE2=m # CONFIG_I2C_SIS96X is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # # CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4505,6 +4507,7 @@ CONFIG_FB_EFI=y CONFIG_FB_SIMPLE=y CONFIG_FB_SSD1307=m # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y # CONFIG_FIRMWARE_EDID is not set @@ -6707,6 +6710,7 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y @@ -6906,6 +6910,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 3c4c4795fb2d..ba42e39c5c62 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -134,7 +134,6 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2575,6 +2574,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3707,6 +3707,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=y CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3792,11 +3793,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -3908,6 +3911,9 @@ CONFIG_PINCTRL_SUNRISEPOINT=m # CONFIG_PINCTRL_TIGERLAKE is not set # end of Intel pinctrl drivers +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + # # Renesas pinctrl drivers # @@ -4239,6 +4245,7 @@ CONFIG_SENSORS_TMP421=m # CONFIG_SENSORS_TMP464 is not set # CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m @@ -4734,6 +4741,7 @@ CONFIG_FB_HYPERV=m # CONFIG_FB_SIMPLE is not set # CONFIG_FB_SSD1307 is not set # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y CONFIG_FIRMWARE_EDID=y @@ -5583,6 +5591,7 @@ CONFIG_VMGENID=y CONFIG_EFI_SECRET=m CONFIG_SEV_GUEST=m CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO=y CONFIG_VIRTIO_PCI_LIB=y @@ -7114,11 +7123,11 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m -CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y @@ -7517,6 +7526,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 35b9d5b8a1cc..128303b14bf0 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -133,7 +133,6 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2570,6 +2569,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3701,6 +3701,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=y CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3786,11 +3787,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4732,6 +4735,7 @@ CONFIG_FB_HYPERV=m # CONFIG_FB_SIMPLE is not set # CONFIG_FB_SSD1307 is not set # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y CONFIG_FIRMWARE_EDID=y @@ -5580,6 +5584,7 @@ CONFIG_VMGENID=y CONFIG_EFI_SECRET=m CONFIG_SEV_GUEST=m CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO=y CONFIG_VIRTIO_PCI_LIB=y @@ -7109,11 +7114,11 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m -CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y @@ -7476,6 +7481,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set -- Gitee From 60cb85402f42300feac8d18201f2170d49db5726 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 11:48:57 +0800 Subject: [PATCH 388/953] anolis: configs: adjust some L0 level kconfigs for arch x86 ANBZ: #8598 Adjust some L0 level kconfigs to make ANCK compatible with more server hardwares. Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 74 ++---- arch/arm64/configs/anolis_defconfig | 101 ++----- arch/x86/configs/anolis-debug_defconfig | 306 +++++++++++++++++++++- arch/x86/configs/anolis_defconfig | 294 ++++++++++++++++++++- 4 files changed, 635 insertions(+), 140 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 4c2362da512f..af35e1ac7f6a 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -107,12 +107,13 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y CONFIG_BPF_LSM=y # end of BPF subsystem -CONFIG_PREEMPT_VOLUNTARY_BUILD=y +CONFIG_PREEMPT_BUILD=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set CONFIG_PREEMPT_COUNT=y -# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y # @@ -139,9 +140,11 @@ CONFIG_CPU_ISOLATION=y # RCU Subsystem # CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y CONFIG_TASKS_RUDE_RCU=y CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y @@ -862,7 +865,7 @@ CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_DEV_THROTTLING_LOW is not set @@ -920,32 +923,6 @@ CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PADATA=y CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y @@ -2355,7 +2332,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m @@ -2456,7 +2433,7 @@ CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y # end of SCSI device support -CONFIG_ATA=y +CONFIG_ATA=m CONFIG_SATA_HOST=y CONFIG_PATA_TIMINGS=y CONFIG_ATA_VERBOSE_ERROR=y @@ -2468,7 +2445,7 @@ CONFIG_SATA_PMP=y # # Controllers with non-SFF native interface # -CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI=m CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=m # CONFIG_AHCI_DWC is not set @@ -2491,7 +2468,7 @@ CONFIG_ATA_BMDMA=y # # SATA SFF controllers with BMDMA # -CONFIG_ATA_PIIX=y +CONFIG_ATA_PIIX=m # CONFIG_SATA_DWC is not set # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set @@ -5163,7 +5140,8 @@ CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5757,7 +5735,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=y +CONFIG_HISI_PMU=m # CONFIG_HISI_PCIE_PMU is not set # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set @@ -5842,7 +5820,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=m CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -5937,7 +5917,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6155,9 +6138,8 @@ CONFIG_IO_WQ=y CONFIG_KEYS=y # CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_TRUSTED_KEYS_TEE=y CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set @@ -6345,8 +6327,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -6490,11 +6472,7 @@ CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_CCP is not set CONFIG_CRYPTO_DEV_CPT=m CONFIG_CAVIUM_CPT=m CONFIG_CRYPTO_DEV_NITROX=m @@ -6728,7 +6706,7 @@ CONFIG_OBJAGG=m CONFIG_GENERIC_IOREMAP=y CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=m +CONFIG_ASN1_ENCODER=y # # Kernel hacking @@ -6931,6 +6909,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) @@ -7031,6 +7010,7 @@ CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y # CONFIG_FUNCTION_PROFILER is not set CONFIG_STACK_TRACER=y # CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set CONFIG_SCHED_TRACER=y CONFIG_HWLAT_TRACER=y CONFIG_OSNOISE_TRACER=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index b659891af7af..8e118362d2e0 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -106,11 +106,13 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y CONFIG_BPF_LSM=y # end of BPF subsystem -CONFIG_PREEMPT_VOLUNTARY_BUILD=y +CONFIG_PREEMPT_BUILD=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set -# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y # @@ -137,9 +139,11 @@ CONFIG_CPU_ISOLATION=y # RCU Subsystem # CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y CONFIG_TASKS_RUDE_RCU=y CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y @@ -858,7 +862,7 @@ CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_DEV_THROTTLING_LOW is not set @@ -916,57 +920,7 @@ CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PADATA=y CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_SPIN_TRYLOCK=y -CONFIG_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_INLINE_SPIN_LOCK=y -CONFIG_INLINE_SPIN_LOCK_BH=y -CONFIG_INLINE_SPIN_LOCK_IRQ=y -CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_INLINE_SPIN_UNLOCK_BH=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_READ_LOCK=y -CONFIG_INLINE_READ_LOCK_BH=y -CONFIG_INLINE_READ_LOCK_IRQ=y -CONFIG_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_BH=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_WRITE_LOCK=y -CONFIG_INLINE_WRITE_LOCK_BH=y -CONFIG_INLINE_WRITE_LOCK_IRQ=y -CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_BH=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_RWSEM_SPIN_ON_OWNER=y @@ -2375,7 +2329,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m @@ -2476,7 +2430,7 @@ CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y # end of SCSI device support -CONFIG_ATA=y +CONFIG_ATA=m CONFIG_SATA_HOST=y CONFIG_PATA_TIMINGS=y CONFIG_ATA_VERBOSE_ERROR=y @@ -2488,7 +2442,7 @@ CONFIG_SATA_PMP=y # # Controllers with non-SFF native interface # -CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI=m CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=m # CONFIG_AHCI_DWC is not set @@ -2511,7 +2465,7 @@ CONFIG_ATA_BMDMA=y # # SATA SFF controllers with BMDMA # -CONFIG_ATA_PIIX=y +CONFIG_ATA_PIIX=m # CONFIG_SATA_DWC is not set # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set @@ -5183,7 +5137,8 @@ CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5776,7 +5731,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=y +CONFIG_HISI_PMU=m # CONFIG_HISI_PCIE_PMU is not set # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set @@ -5861,7 +5816,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=m CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -5956,7 +5913,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6174,9 +6134,8 @@ CONFIG_IO_WQ=y CONFIG_KEYS=y # CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_TRUSTED_KEYS_TEE=y CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set @@ -6364,8 +6323,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -6509,11 +6468,7 @@ CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_CCP is not set CONFIG_CRYPTO_DEV_CPT=m CONFIG_CAVIUM_CPT=m CONFIG_CRYPTO_DEV_NITROX=m @@ -6745,7 +6700,7 @@ CONFIG_OBJAGG=m CONFIG_GENERIC_IOREMAP=y CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=m +CONFIG_ASN1_ENCODER=y # # Kernel hacking @@ -6914,6 +6869,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) @@ -7000,6 +6956,7 @@ CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y # CONFIG_FUNCTION_PROFILER is not set CONFIG_STACK_TRACER=y # CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set CONFIG_SCHED_TRACER=y CONFIG_HWLAT_TRACER=y CONFIG_OSNOISE_TRACER=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ba42e39c5c62..2e13de357040 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1807,7 +1807,7 @@ CONFIG_NET_ACT_CT=m CONFIG_NET_TC_SKB_EXT=y CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y +CONFIG_DNS_RESOLVER=m # CONFIG_BATMAN_ADV is not set CONFIG_OPENVSWITCH=m CONFIG_OPENVSWITCH_GRE=m @@ -2090,6 +2090,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set # CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set @@ -2224,6 +2225,10 @@ CONFIG_MTD_CFI_I2=y # Self-contained MTD device drivers # # CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -2240,6 +2245,7 @@ CONFIG_MTD_CFI_I2=y # # CONFIG_MTD_ONENAND is not set # CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set # # ECC engine support @@ -2256,6 +2262,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers +# CONFIG_MTD_SPI_NOR is not set CONFIG_MTD_UBI=m CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 @@ -2359,6 +2366,7 @@ CONFIG_SENSORS_APDS990X=m # CONFIG_HMC6352 is not set # CONFIG_DS1682 is not set CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set # CONFIG_DW_XDATA_PCIE is not set # CONFIG_PCI_ENDPOINT_TEST is not set @@ -2370,9 +2378,11 @@ CONFIG_MISC_RTSX=m # EEPROM support # # CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set # CONFIG_EEPROM_EE1004 is not set # end of EEPROM support @@ -2768,6 +2778,7 @@ CONFIG_NET_VENDOR_AQUANTIA=y CONFIG_AQTION=m # CONFIG_NET_VENDOR_ARC is not set CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set CONFIG_NET_VENDOR_ATHEROS=y CONFIG_ATL2=m CONFIG_ATL1=m @@ -2814,6 +2825,7 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m # CONFIG_NET_VENDOR_CORTINA is not set CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set @@ -2861,6 +2873,8 @@ CONFIG_ICE_HWTS=y CONFIG_FM10K=m CONFIG_IGC=m # CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set CONFIG_NET_VENDOR_LITEX=y # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y @@ -2968,6 +2982,7 @@ CONFIG_SFC_MCDI_LOGGING=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set CONFIG_NET_VENDOR_WANGXUN=y CONFIG_LIBWX=m @@ -3042,6 +3057,7 @@ CONFIG_DP83867_PHY=m # CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set # CONFIG_PSE_CONTROLLER is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y @@ -3244,6 +3260,7 @@ CONFIG_MT76x2U=m # CONFIG_MT7996E is not set CONFIG_WLAN_VENDOR_MICROCHIP=y # CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set CONFIG_WLAN_VENDOR_PURELIFI=y # CONFIG_PLFXLC is not set CONFIG_WLAN_VENDOR_RALINK=y @@ -3346,7 +3363,13 @@ CONFIG_HDLC_PPP=m # CONFIG_FARSYNC is not set CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set # CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set # CONFIG_IEEE802154_HWSIM is not set # @@ -3412,6 +3435,7 @@ CONFIG_INPUT_KEYBOARD=y # CONFIG_KEYBOARD_ADC is not set # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_QT1050 is not set # CONFIG_KEYBOARD_QT1070 is not set @@ -3473,6 +3497,8 @@ CONFIG_TABLET_USB_KBTAB=m # CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_ADC is not set # CONFIG_TOUCHSCREEN_ATMEL_MXT is not set @@ -3523,12 +3549,14 @@ CONFIG_TOUCHSCREEN_WACOM_I2C=m # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set # CONFIG_TOUCHSCREEN_TSC_SERIO is not set # CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set # CONFIG_TOUCHSCREEN_TSC2007 is not set # CONFIG_TOUCHSCREEN_RM_TS is not set # CONFIG_TOUCHSCREEN_SILEAD is not set # CONFIG_TOUCHSCREEN_SIS_I2C is not set # CONFIG_TOUCHSCREEN_ST1232 is not set # CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set # CONFIG_TOUCHSCREEN_SX8654 is not set # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set @@ -3575,6 +3603,7 @@ CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m # CONFIG_INPUT_DRV2667_HAPTICS is not set CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set CONFIG_RMI4_SMB=m CONFIG_RMI4_F03=y CONFIG_RMI4_F03_SERIO=m @@ -3656,6 +3685,8 @@ CONFIG_SERIAL_8250_PERICOM=y # Non-8250 serial port support # # CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y @@ -3724,6 +3755,7 @@ CONFIG_TCG_TPM=y CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=y CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set # CONFIG_TCG_TIS_I2C is not set # CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m @@ -3737,6 +3769,7 @@ CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set CONFIG_TELCLOCK=m # CONFIG_XILLYBUS is not set # CONFIG_XILLYUSB is not set @@ -3845,7 +3878,49 @@ CONFIG_I2C_STUB=m # end of I2C support # CONFIG_I3C is not set -# CONFIG_SPI is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y @@ -3982,6 +4057,16 @@ CONFIG_GPIO_ICH=m # CONFIG_GPIO_RDC321X is not set # end of PCI GPIO expanders +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + # # USB GPIO expanders # @@ -4047,6 +4132,7 @@ CONFIG_HWMON_VID=m # CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m @@ -4056,6 +4142,7 @@ CONFIG_SENSORS_ADM1031=m # CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m @@ -4102,6 +4189,7 @@ CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LINEAGE=m # CONFIG_SENSORS_LTC2945 is not set # CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set # CONFIG_SENSORS_LTC2990 is not set # CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m @@ -4110,11 +4198,13 @@ CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4245=m # CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set # CONFIG_SENSORS_MAX31760 is not set # CONFIG_MAX31827 is not set @@ -4130,7 +4220,9 @@ CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set # CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -4230,6 +4322,7 @@ CONFIG_SENSORS_SCH5636=m # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m @@ -4410,10 +4503,12 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set # CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_MFD_MP2629 is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set @@ -4435,6 +4530,8 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set @@ -4457,14 +4554,19 @@ CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_TPS65086 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set # end of Multifunction device drivers # CONFIG_REGULATOR is not set @@ -4663,7 +4765,10 @@ CONFIG_DRM_PANEL=y # # Display Panels # +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set # CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set # end of Display Panels CONFIG_DRM_BRIDGE=y @@ -4680,7 +4785,17 @@ CONFIG_DRM_PANEL_BRIDGE=y CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set # CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_XEN_FRONTEND is not set # CONFIG_DRM_VBOXVIDEO is not set # CONFIG_DRM_GUD is not set @@ -4766,7 +4881,18 @@ CONFIG_FB_TILEBLITTING=y # Backlight & LCD device support # CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -5017,6 +5143,7 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y # CONFIG_USB_OHCI_HCD_PLATFORM is not set @@ -5234,6 +5361,7 @@ CONFIG_MMC_SDHCI_PLTFM=m # CONFIG_MMC_SDHCI_F_SDH30 is not set # CONFIG_MMC_WBSD is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m @@ -5287,6 +5415,7 @@ CONFIG_LEDS_LP3944=m # CONFIG_LEDS_PCA955X is not set # CONFIG_LEDS_PCA963X is not set # CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set # CONFIG_LEDS_PWM is not set # CONFIG_LEDS_BD2606MVV is not set # CONFIG_LEDS_BD2802 is not set @@ -5409,7 +5538,8 @@ CONFIG_RTC_MC146818_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5460,6 +5590,20 @@ CONFIG_RTC_DRV_EM3027=m # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set CONFIG_RTC_I2C_AND_SPI=m # @@ -5658,6 +5802,7 @@ CONFIG_MLXREG_HOTPLUG=m # CONFIG_MLXREG_LC is not set # CONFIG_NVSW_SN2201 is not set CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set # CONFIG_SURFACE_HOTPLUG is not set @@ -5756,6 +5901,7 @@ CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y # CONFIG_SYSTEM76_ACPI is not set CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set CONFIG_MLX_PLATFORM=m CONFIG_INTEL_IPS=m # CONFIG_INTEL_SCU_PCI is not set @@ -5767,6 +5913,7 @@ CONFIG_P2SB=y CONFIG_HAVE_CLK=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set # CONFIG_COMMON_CLK_MAX9485 is not set # CONFIG_COMMON_CLK_SI5341 is not set # CONFIG_COMMON_CLK_SI5351 is not set @@ -5907,27 +6054,39 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 # # Accelerometers # +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set # CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set # CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set # CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set # CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set # CONFIG_ADXL372_I2C is not set # CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set # CONFIG_BMA400 is not set # CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set # CONFIG_DA280 is not set # CONFIG_DA311 is not set # CONFIG_DMARD06 is not set # CONFIG_DMARD09 is not set # CONFIG_DMARD10 is not set # CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set # CONFIG_IIO_KX022A_I2C is not set # CONFIG_KXSD9 is not set # CONFIG_KXCJK1013 is not set # CONFIG_MC3230 is not set # CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set # CONFIG_MMA7660 is not set # CONFIG_MMA8452 is not set # CONFIG_MMA9551 is not set @@ -5935,6 +6094,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_MSA311 is not set # CONFIG_MXC4005 is not set # CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set # CONFIG_STK8312 is not set # CONFIG_STK8BA50 is not set # end of Accelerometers @@ -5942,26 +6103,67 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital converters # +# CONFIG_AD4130 is not set # CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set # CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set # CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set # CONFIG_AD799X is not set # CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set # CONFIG_HX711 is not set # CONFIG_INA2XX_ADC is not set # CONFIG_LTC2471 is not set # CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set # CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set # CONFIG_MAX1363 is not set # CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set # CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set # CONFIG_NAU7802 is not set # CONFIG_RICHTEK_RTQ6056 is not set # CONFIG_SD_ADC_MODULATOR is not set # CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set # CONFIG_TI_ADS1015 is not set # CONFIG_TI_ADS7924 is not set # CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set # CONFIG_VF610_ADC is not set # CONFIG_VIPERBOARD_ADC is not set # CONFIG_XILINX_XADC is not set @@ -5970,6 +6172,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital and digital to analog converters # +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set # end of Analog to digital and digital to analog converters # @@ -5981,6 +6185,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Amplifiers # +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set # CONFIG_HMC425 is not set # end of Amplifiers @@ -6023,24 +6229,51 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # SSP Sensor Common # +# CONFIG_IIO_SSP_SENSORHUB is not set # end of SSP Sensor Common # # Digital to analog converters # +# CONFIG_AD3552R is not set # CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set # CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set # CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set # CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set # CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set # CONFIG_DPOT_DAC is not set # CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set # CONFIG_M62332 is not set # CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set # CONFIG_MAX5821 is not set # CONFIG_MCP4725 is not set # CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set # CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set # CONFIG_VF610_DAC is not set # end of Digital to analog converters @@ -6052,6 +6285,7 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Filters # +# CONFIG_ADMV8818 is not set # end of Filters # @@ -6061,17 +6295,31 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Clock Generator/Distribution # +# CONFIG_AD9523 is not set # end of Clock Generator/Distribution # # Phase-Locked Loop (PLL) frequency synthesizers # +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set # end of Phase-Locked Loop (PLL) frequency synthesizers # end of Frequency Synthesizers DDS/PLL # # Digital gyroscope sensors # +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set # CONFIG_BMG160 is not set # CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m @@ -6087,6 +6335,7 @@ CONFIG_HID_SENSOR_GYRO_3D=m # # Heart Rate Monitors # +# CONFIG_AFE4403 is not set # CONFIG_AFE4404 is not set # CONFIG_MAX30100 is not set # CONFIG_MAX30102 is not set @@ -6110,12 +6359,20 @@ CONFIG_HID_SENSOR_HUMIDITY=m # # Inertial measurement units # +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set # CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set # CONFIG_BOSCH_BNO055_I2C is not set # CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set # CONFIG_KMX61 is not set # CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set # CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set # CONFIG_IIO_ST_LSM6DSX is not set # CONFIG_IIO_ST_LSM9DS0 is not set # end of Inertial measurement units @@ -6185,12 +6442,15 @@ CONFIG_HID_SENSOR_PROX=m # CONFIG_AK8975 is not set # CONFIG_AK09911 is not set # CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set # CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m # CONFIG_MMC35240 is not set # CONFIG_IIO_ST_MAGN_3AXIS is not set # CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set # CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set # CONFIG_TI_TMAG5273 is not set # CONFIG_YAMAHA_YAS530 is not set # end of Magnetometer sensors @@ -6228,9 +6488,14 @@ CONFIG_HID_SENSOR_DEVICE_ROTATION=m # CONFIG_AD5272 is not set # CONFIG_DS1803 is not set # CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set # CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set # CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set # CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set # end of Digital potentiometers # @@ -6250,6 +6515,7 @@ CONFIG_HID_SENSOR_PRESS=m # CONFIG_HP03 is not set # CONFIG_ICP10100 is not set # CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set # CONFIG_MPL3115 is not set # CONFIG_MPRLS0025PA is not set # CONFIG_MS5611 is not set @@ -6263,6 +6529,7 @@ CONFIG_HID_SENSOR_PRESS=m # # Lightning sensors # +# CONFIG_AS3935 is not set # end of Lightning sensors # @@ -6287,11 +6554,15 @@ CONFIG_HID_SENSOR_PRESS=m # # Resolver to digital converters # +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set # end of Resolver to digital converters # # Temperature sensors # +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m # CONFIG_MLX90614 is not set # CONFIG_MLX90632 is not set @@ -6301,6 +6572,8 @@ CONFIG_HID_SENSOR_TEMP=m # CONFIG_TSYS01 is not set # CONFIG_TSYS02D is not set # CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set # end of Temperature sensors CONFIG_NTB=m @@ -6442,7 +6715,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -6507,7 +6782,7 @@ CONFIG_OVERLAY_FS_INDEX=y # # Caches # -CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_SUPPORT=m CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y @@ -6538,7 +6813,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6678,7 +6956,7 @@ CONFIG_CEPH_FS=m # CONFIG_CEPH_FSCACHE is not set CONFIG_CEPH_FS_POSIX_ACL=y # CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=y +CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y @@ -6689,8 +6967,10 @@ CONFIG_CIFS_DEBUG=y # CONFIG_CIFS_DEBUG_DUMP_KEYS is not set CONFIG_CIFS_DFS_UPCALL=y # CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set # CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=y +CONFIG_SMBFS=m # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set CONFIG_NLS=y @@ -6744,7 +7024,7 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y # CONFIG_UNICODE is not set @@ -6949,8 +7229,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -7076,8 +7356,8 @@ CONFIG_CRYPTO_DES3_EDE_X86_64=m CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 128303b14bf0..981a32ab9cf6 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2085,6 +2085,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set # CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set @@ -2219,6 +2220,10 @@ CONFIG_MTD_CFI_I2=y # Self-contained MTD device drivers # # CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -2235,6 +2240,7 @@ CONFIG_MTD_CFI_I2=y # # CONFIG_MTD_ONENAND is not set # CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set # # ECC engine support @@ -2251,6 +2257,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers +# CONFIG_MTD_SPI_NOR is not set CONFIG_MTD_UBI=m CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 @@ -2354,6 +2361,7 @@ CONFIG_SENSORS_APDS990X=m # CONFIG_HMC6352 is not set # CONFIG_DS1682 is not set CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set # CONFIG_DW_XDATA_PCIE is not set # CONFIG_PCI_ENDPOINT_TEST is not set @@ -2365,9 +2373,11 @@ CONFIG_MISC_RTSX=m # EEPROM support # # CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set # CONFIG_EEPROM_EE1004 is not set # end of EEPROM support @@ -2763,6 +2773,7 @@ CONFIG_NET_VENDOR_AQUANTIA=y CONFIG_AQTION=m # CONFIG_NET_VENDOR_ARC is not set CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set CONFIG_NET_VENDOR_ATHEROS=y CONFIG_ATL2=m CONFIG_ATL1=m @@ -2809,6 +2820,7 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m # CONFIG_NET_VENDOR_CORTINA is not set CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set @@ -2856,6 +2868,8 @@ CONFIG_ICE_HWTS=y CONFIG_FM10K=m CONFIG_IGC=m # CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set CONFIG_NET_VENDOR_LITEX=y # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y @@ -2963,6 +2977,7 @@ CONFIG_SFC_MCDI_LOGGING=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set CONFIG_NET_VENDOR_WANGXUN=y CONFIG_LIBWX=m @@ -3037,6 +3052,7 @@ CONFIG_DP83867_PHY=m # CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set # CONFIG_PSE_CONTROLLER is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y @@ -3238,6 +3254,7 @@ CONFIG_MT76x2U=m # CONFIG_MT7996E is not set CONFIG_WLAN_VENDOR_MICROCHIP=y # CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set CONFIG_WLAN_VENDOR_PURELIFI=y # CONFIG_PLFXLC is not set CONFIG_WLAN_VENDOR_RALINK=y @@ -3340,7 +3357,13 @@ CONFIG_HDLC_PPP=m # CONFIG_FARSYNC is not set CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set # CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set # CONFIG_IEEE802154_HWSIM is not set # @@ -3406,6 +3429,7 @@ CONFIG_INPUT_KEYBOARD=y # CONFIG_KEYBOARD_ADC is not set # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_QT1050 is not set # CONFIG_KEYBOARD_QT1070 is not set @@ -3467,6 +3491,8 @@ CONFIG_TABLET_USB_KBTAB=m # CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_ADC is not set # CONFIG_TOUCHSCREEN_ATMEL_MXT is not set @@ -3517,12 +3543,14 @@ CONFIG_TOUCHSCREEN_WACOM_I2C=m # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set # CONFIG_TOUCHSCREEN_TSC_SERIO is not set # CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set # CONFIG_TOUCHSCREEN_TSC2007 is not set # CONFIG_TOUCHSCREEN_RM_TS is not set # CONFIG_TOUCHSCREEN_SILEAD is not set # CONFIG_TOUCHSCREEN_SIS_I2C is not set # CONFIG_TOUCHSCREEN_ST1232 is not set # CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set # CONFIG_TOUCHSCREEN_SX8654 is not set # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set @@ -3569,6 +3597,7 @@ CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m # CONFIG_INPUT_DRV2667_HAPTICS is not set CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set CONFIG_RMI4_SMB=m CONFIG_RMI4_F03=y CONFIG_RMI4_F03_SERIO=m @@ -3650,6 +3679,8 @@ CONFIG_SERIAL_8250_PERICOM=y # Non-8250 serial port support # # CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y @@ -3718,6 +3749,7 @@ CONFIG_TCG_TPM=y CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=y CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set # CONFIG_TCG_TIS_I2C is not set # CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m @@ -3731,6 +3763,7 @@ CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set CONFIG_TELCLOCK=m # CONFIG_XILLYBUS is not set # CONFIG_XILLYUSB is not set @@ -3839,7 +3872,49 @@ CONFIG_I2C_STUB=m # end of I2C support # CONFIG_I3C is not set -# CONFIG_SPI is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y @@ -3976,6 +4051,16 @@ CONFIG_GPIO_ICH=m # CONFIG_GPIO_RDC321X is not set # end of PCI GPIO expanders +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + # # USB GPIO expanders # @@ -4041,6 +4126,7 @@ CONFIG_HWMON_VID=m # CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m @@ -4050,6 +4136,7 @@ CONFIG_SENSORS_ADM1031=m # CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m @@ -4096,6 +4183,7 @@ CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LINEAGE=m # CONFIG_SENSORS_LTC2945 is not set # CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set # CONFIG_SENSORS_LTC2990 is not set # CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m @@ -4104,11 +4192,13 @@ CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4245=m # CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set # CONFIG_SENSORS_MAX31760 is not set # CONFIG_MAX31827 is not set @@ -4124,7 +4214,9 @@ CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set # CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -4224,6 +4316,7 @@ CONFIG_SENSORS_SCH5636=m # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m @@ -4404,10 +4497,12 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set # CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_MFD_MP2629 is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set @@ -4429,6 +4524,8 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set @@ -4451,14 +4548,19 @@ CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_TPS65086 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set # end of Multifunction device drivers # CONFIG_REGULATOR is not set @@ -4657,7 +4759,10 @@ CONFIG_DRM_PANEL=y # # Display Panels # +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set # CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set # end of Display Panels CONFIG_DRM_BRIDGE=y @@ -4674,7 +4779,17 @@ CONFIG_DRM_PANEL_BRIDGE=y CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set # CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_XEN_FRONTEND is not set # CONFIG_DRM_VBOXVIDEO is not set # CONFIG_DRM_GUD is not set @@ -4760,7 +4875,18 @@ CONFIG_FB_TILEBLITTING=y # Backlight & LCD device support # CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -5011,6 +5137,7 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y # CONFIG_USB_OHCI_HCD_PLATFORM is not set @@ -5228,6 +5355,7 @@ CONFIG_MMC_SDHCI_PLTFM=m # CONFIG_MMC_SDHCI_F_SDH30 is not set # CONFIG_MMC_WBSD is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m @@ -5281,6 +5409,7 @@ CONFIG_LEDS_LP3944=m # CONFIG_LEDS_PCA955X is not set # CONFIG_LEDS_PCA963X is not set # CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set # CONFIG_LEDS_PWM is not set # CONFIG_LEDS_BD2606MVV is not set # CONFIG_LEDS_BD2802 is not set @@ -5403,7 +5532,8 @@ CONFIG_RTC_MC146818_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5454,6 +5584,20 @@ CONFIG_RTC_DRV_EM3027=m # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set CONFIG_RTC_I2C_AND_SPI=m # @@ -5651,6 +5795,7 @@ CONFIG_MLXREG_HOTPLUG=m # CONFIG_MLXREG_LC is not set # CONFIG_NVSW_SN2201 is not set CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set # CONFIG_SURFACE_HOTPLUG is not set @@ -5749,6 +5894,7 @@ CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y # CONFIG_SYSTEM76_ACPI is not set CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set CONFIG_MLX_PLATFORM=m CONFIG_INTEL_IPS=m # CONFIG_INTEL_SCU_PCI is not set @@ -5760,6 +5906,7 @@ CONFIG_P2SB=y CONFIG_HAVE_CLK=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set # CONFIG_COMMON_CLK_MAX9485 is not set # CONFIG_COMMON_CLK_SI5341 is not set # CONFIG_COMMON_CLK_SI5351 is not set @@ -5896,27 +6043,39 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 # # Accelerometers # +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set # CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set # CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set # CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set # CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set # CONFIG_ADXL372_I2C is not set # CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set # CONFIG_BMA400 is not set # CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set # CONFIG_DA280 is not set # CONFIG_DA311 is not set # CONFIG_DMARD06 is not set # CONFIG_DMARD09 is not set # CONFIG_DMARD10 is not set # CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set # CONFIG_IIO_KX022A_I2C is not set # CONFIG_KXSD9 is not set # CONFIG_KXCJK1013 is not set # CONFIG_MC3230 is not set # CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set # CONFIG_MMA7660 is not set # CONFIG_MMA8452 is not set # CONFIG_MMA9551 is not set @@ -5924,6 +6083,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_MSA311 is not set # CONFIG_MXC4005 is not set # CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set # CONFIG_STK8312 is not set # CONFIG_STK8BA50 is not set # end of Accelerometers @@ -5931,26 +6092,67 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital converters # +# CONFIG_AD4130 is not set # CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set # CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set # CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set # CONFIG_AD799X is not set # CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set # CONFIG_HX711 is not set # CONFIG_INA2XX_ADC is not set # CONFIG_LTC2471 is not set # CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set # CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set # CONFIG_MAX1363 is not set # CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set # CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set # CONFIG_NAU7802 is not set # CONFIG_RICHTEK_RTQ6056 is not set # CONFIG_SD_ADC_MODULATOR is not set # CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set # CONFIG_TI_ADS1015 is not set # CONFIG_TI_ADS7924 is not set # CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set # CONFIG_VF610_ADC is not set # CONFIG_VIPERBOARD_ADC is not set # CONFIG_XILINX_XADC is not set @@ -5959,6 +6161,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital and digital to analog converters # +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set # end of Analog to digital and digital to analog converters # @@ -5970,6 +6174,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Amplifiers # +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set # CONFIG_HMC425 is not set # end of Amplifiers @@ -6012,24 +6218,51 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # SSP Sensor Common # +# CONFIG_IIO_SSP_SENSORHUB is not set # end of SSP Sensor Common # # Digital to analog converters # +# CONFIG_AD3552R is not set # CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set # CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set # CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set # CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set # CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set # CONFIG_DPOT_DAC is not set # CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set # CONFIG_M62332 is not set # CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set # CONFIG_MAX5821 is not set # CONFIG_MCP4725 is not set # CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set # CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set # CONFIG_VF610_DAC is not set # end of Digital to analog converters @@ -6041,6 +6274,7 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Filters # +# CONFIG_ADMV8818 is not set # end of Filters # @@ -6050,17 +6284,31 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Clock Generator/Distribution # +# CONFIG_AD9523 is not set # end of Clock Generator/Distribution # # Phase-Locked Loop (PLL) frequency synthesizers # +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set # end of Phase-Locked Loop (PLL) frequency synthesizers # end of Frequency Synthesizers DDS/PLL # # Digital gyroscope sensors # +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set # CONFIG_BMG160 is not set # CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m @@ -6076,6 +6324,7 @@ CONFIG_HID_SENSOR_GYRO_3D=m # # Heart Rate Monitors # +# CONFIG_AFE4403 is not set # CONFIG_AFE4404 is not set # CONFIG_MAX30100 is not set # CONFIG_MAX30102 is not set @@ -6099,12 +6348,20 @@ CONFIG_HID_SENSOR_HUMIDITY=m # # Inertial measurement units # +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set # CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set # CONFIG_BOSCH_BNO055_I2C is not set # CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set # CONFIG_KMX61 is not set # CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set # CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set # CONFIG_IIO_ST_LSM6DSX is not set # CONFIG_IIO_ST_LSM9DS0 is not set # end of Inertial measurement units @@ -6174,12 +6431,15 @@ CONFIG_HID_SENSOR_PROX=m # CONFIG_AK8975 is not set # CONFIG_AK09911 is not set # CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set # CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m # CONFIG_MMC35240 is not set # CONFIG_IIO_ST_MAGN_3AXIS is not set # CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set # CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set # CONFIG_TI_TMAG5273 is not set # CONFIG_YAMAHA_YAS530 is not set # end of Magnetometer sensors @@ -6217,9 +6477,14 @@ CONFIG_HID_SENSOR_DEVICE_ROTATION=m # CONFIG_AD5272 is not set # CONFIG_DS1803 is not set # CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set # CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set # CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set # CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set # end of Digital potentiometers # @@ -6239,6 +6504,7 @@ CONFIG_HID_SENSOR_PRESS=m # CONFIG_HP03 is not set # CONFIG_ICP10100 is not set # CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set # CONFIG_MPL3115 is not set # CONFIG_MPRLS0025PA is not set # CONFIG_MS5611 is not set @@ -6252,6 +6518,7 @@ CONFIG_HID_SENSOR_PRESS=m # # Lightning sensors # +# CONFIG_AS3935 is not set # end of Lightning sensors # @@ -6276,11 +6543,15 @@ CONFIG_HID_SENSOR_PRESS=m # # Resolver to digital converters # +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set # end of Resolver to digital converters # # Temperature sensors # +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m # CONFIG_MLX90614 is not set # CONFIG_MLX90632 is not set @@ -6290,6 +6561,8 @@ CONFIG_HID_SENSOR_TEMP=m # CONFIG_TSYS01 is not set # CONFIG_TSYS02D is not set # CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set # end of Temperature sensors CONFIG_NTB=m @@ -6431,7 +6704,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -6527,7 +6802,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6940,8 +7218,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -7067,8 +7345,8 @@ CONFIG_CRYPTO_DES3_EDE_X86_64=m CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m -- Gitee From e7e94cf88d6ade7b19bbe3a4f55e667f15305036 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 19:26:14 +0800 Subject: [PATCH 389/953] anolis: configs: adjust L1 level kconfigs ANBZ: #8598 Adjust L1 kconfigs to further improve the compatibility of ANCK. Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 77 ++++++++++++++++------- arch/arm64/configs/anolis_defconfig | 77 ++++++++++++++++------- arch/x86/configs/anolis-debug_defconfig | 57 ++++++++++++----- arch/x86/configs/anolis_defconfig | 57 ++++++++++++----- 4 files changed, 198 insertions(+), 70 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index af35e1ac7f6a..b2a67eec8bf0 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -394,6 +394,7 @@ CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y CONFIG_ARM64_ERRATUM_2054223=y CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y CONFIG_ARM64_ERRATUM_2966298=y CONFIG_CAVIUM_ERRATUM_22375=y @@ -424,7 +425,7 @@ CONFIG_ARM64_PA_BITS=48 # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SCHED_MC=y -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_NR_CPUS=1024 CONFIG_HOTPLUG_CPU=y @@ -495,7 +496,7 @@ CONFIG_AS_HAS_LDAPR=y # # ARMv8.4 architectural features # -# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y # end of ARMv8.4 architectural features @@ -615,7 +616,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y # # CONFIG_CPUFREQ_DT is not set # CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ACPI_CPPC_CPUFREQ_FIE=y CONFIG_ARM_SCPI_CPUFREQ=m # CONFIG_ARM_QCOM_CPUFREQ_HW is not set @@ -626,6 +627,7 @@ CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_GENERIC_GSI=y CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y CONFIG_ACPI_DEBUGGER=y CONFIG_ACPI_DEBUGGER_USER=m CONFIG_ACPI_SPCR_TABLE=y @@ -861,6 +863,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1055,6 +1058,7 @@ CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA32=y CONFIG_ZONE_DEVICE=y CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y # CONFIG_DEVICE_PRIVATE is not set CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y CONFIG_ARCH_USES_PG_ARCH_X=y @@ -1846,6 +1850,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y # CONFIG_PCI_PF_STUB is not set CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -1915,7 +1920,17 @@ CONFIG_PCI_HISI=y # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -1947,11 +1962,13 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_ALLOW_DEV_COREDUMP=y @@ -2387,7 +2404,7 @@ CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_MPT2SAS=m # CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set @@ -2396,7 +2413,7 @@ CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FDOMAIN_PCI is not set @@ -2749,7 +2766,7 @@ CONFIG_HINIC=m # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -# CONFIG_E1000 is not set +CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m CONFIG_IGB_HWMON=y @@ -3411,7 +3428,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m CONFIG_I2C_GPIO_FAULT_INJECTOR=y -# CONFIG_I2C_HISI is not set +CONFIG_I2C_HISI=m # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m @@ -3612,7 +3629,7 @@ CONFIG_GPIO_DWAPB=m # CONFIG_GPIO_FTGPIO010 is not set CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HISI is not set +CONFIG_GPIO_HISI=m # CONFIG_GPIO_HLWD is not set # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set @@ -5345,8 +5362,16 @@ CONFIG_VFIO_PCI=m # # VFIO support for platform devices # -# CONFIG_VFIO_PLATFORM is not set +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m # CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers # end of VFIO support for platform devices # CONFIG_VIRT_DRIVERS is not set @@ -5563,7 +5588,7 @@ CONFIG_ARM_SMMU_V3=y # # Hisilicon SoC drivers # -# CONFIG_KUNPENG_HCCS is not set +CONFIG_KUNPENG_HCCS=m # end of Hisilicon SoC drivers # @@ -5736,7 +5761,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m CONFIG_HISI_PMU=m -# CONFIG_HISI_PCIE_PMU is not set +CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set @@ -5766,6 +5791,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=m CONFIG_DEV_DAX_PMEM=m CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y # CONFIG_DEV_DAX_KMEM is not set CONFIG_NVMEM=y @@ -5844,7 +5870,13 @@ CONFIG_XFS_WARN=y # CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -6302,11 +6334,12 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_DH=m # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECDSA is not set # CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set +CONFIG_CRYPTO_CURVE25519=m # end of Public-key cryptography # @@ -6492,11 +6525,12 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m # CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_QM=m # CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_HISI_HPRE is not set -# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y @@ -6691,6 +6725,7 @@ CONFIG_FONT_SUPPORT=y # CONFIG_FONTS is not set CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y @@ -6737,8 +6772,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 8e118362d2e0..092a98239318 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -392,6 +392,7 @@ CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y CONFIG_ARM64_ERRATUM_2054223=y CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y CONFIG_ARM64_ERRATUM_2966298=y CONFIG_CAVIUM_ERRATUM_22375=y @@ -422,7 +423,7 @@ CONFIG_ARM64_PA_BITS=48 # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SCHED_MC=y -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_NR_CPUS=1024 CONFIG_HOTPLUG_CPU=y @@ -493,7 +494,7 @@ CONFIG_AS_HAS_LDAPR=y # # ARMv8.4 architectural features # -# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y # end of ARMv8.4 architectural features @@ -613,7 +614,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y # # CONFIG_CPUFREQ_DT is not set # CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ACPI_CPPC_CPUFREQ_FIE=y CONFIG_ARM_SCPI_CPUFREQ=m # CONFIG_ARM_QCOM_CPUFREQ_HW is not set @@ -624,6 +625,7 @@ CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_GENERIC_GSI=y CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y # CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y # CONFIG_ACPI_FPDT is not set @@ -858,6 +860,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1052,6 +1055,7 @@ CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA32=y CONFIG_ZONE_DEVICE=y CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y # CONFIG_DEVICE_PRIVATE is not set CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y CONFIG_ARCH_USES_PG_ARCH_X=y @@ -1843,6 +1847,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y # CONFIG_PCI_PF_STUB is not set CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -1912,7 +1917,17 @@ CONFIG_PCI_HISI=y # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -1944,11 +1959,13 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_ALLOW_DEV_COREDUMP=y @@ -2384,7 +2401,7 @@ CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_MPT2SAS=m # CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set @@ -2393,7 +2410,7 @@ CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FDOMAIN_PCI is not set @@ -2746,7 +2763,7 @@ CONFIG_HINIC=m # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -# CONFIG_E1000 is not set +CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m CONFIG_IGB_HWMON=y @@ -3408,7 +3425,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_GPIO_FAULT_INJECTOR is not set -# CONFIG_I2C_HISI is not set +CONFIG_I2C_HISI=m # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m @@ -3609,7 +3626,7 @@ CONFIG_GPIO_DWAPB=m # CONFIG_GPIO_FTGPIO010 is not set CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HISI is not set +CONFIG_GPIO_HISI=m # CONFIG_GPIO_HLWD is not set # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set @@ -5341,8 +5358,16 @@ CONFIG_VFIO_PCI=m # # VFIO support for platform devices # -# CONFIG_VFIO_PLATFORM is not set +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m # CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers # end of VFIO support for platform devices # CONFIG_VIRT_DRIVERS is not set @@ -5559,7 +5584,7 @@ CONFIG_ARM_SMMU_V3=y # # Hisilicon SoC drivers # -# CONFIG_KUNPENG_HCCS is not set +CONFIG_KUNPENG_HCCS=m # end of Hisilicon SoC drivers # @@ -5732,7 +5757,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m CONFIG_HISI_PMU=m -# CONFIG_HISI_PCIE_PMU is not set +CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set @@ -5762,6 +5787,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=m CONFIG_DEV_DAX_PMEM=m CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y # CONFIG_DEV_DAX_KMEM is not set CONFIG_NVMEM=y @@ -5840,7 +5866,13 @@ CONFIG_XFS_POSIX_ACL=y # CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -6298,11 +6330,12 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_DH=m # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECDSA is not set # CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set +CONFIG_CRYPTO_CURVE25519=m # end of Public-key cryptography # @@ -6488,11 +6521,12 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m # CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_QM=m # CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_HISI_HPRE is not set -# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y @@ -6686,6 +6720,7 @@ CONFIG_FONT_SUPPORT=y # CONFIG_FONTS is not set CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y @@ -6731,8 +6766,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2e13de357040..36a089094635 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -40,13 +40,13 @@ CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -# CONFIG_KERNEL_ZSTD is not set +CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y @@ -367,7 +367,7 @@ CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_DEBUG is not set -# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_PARAVIRT_SPINLOCKS=y CONFIG_X86_HV_CALLBACK_VECTOR=y CONFIG_XEN=y # CONFIG_XEN_PV is not set @@ -414,7 +414,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y +# CONFIG_SCHED_CLUSTER is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -578,6 +578,7 @@ CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y CONFIG_ACPI_DEBUGGER=y CONFIG_ACPI_DEBUGGER_USER=m CONFIG_ACPI_SPCR_TABLE=y @@ -824,7 +825,7 @@ CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y CONFIG_STACKPROTECTOR=y -# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_STACKPROTECTOR_STRONG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y CONFIG_LTO_NONE=y @@ -942,6 +943,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1984,6 +1986,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=y CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_LOCKLESS_CONFIG=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -2035,7 +2038,17 @@ CONFIG_PCI_HYPERV_INTERFACE=m # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -2074,7 +2087,7 @@ CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_WANT_DEV_COREDUMP=y @@ -2502,7 +2515,7 @@ CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m CONFIG_FCOE_FNIC=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -5855,10 +5868,14 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m # # Intel Speed Select Technology interface support # +CONFIG_INTEL_SPEED_SELECT_TPMI=m CONFIG_INTEL_SPEED_SELECT_INTERFACE=m # end of Intel Speed Select Technology interface support @@ -5879,9 +5896,11 @@ CONFIG_INTEL_OAKTRAIL=m # CONFIG_INTEL_ISHTP_ECLITE is not set # CONFIG_INTEL_PUNIT_IPC is not set CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set # CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m CONFIG_INTEL_TURBO_MAX_3=y -# CONFIG_INTEL_VSEC is not set +CONFIG_INTEL_VSEC=y # CONFIG_MSI_EC is not set CONFIG_MSI_LAPTOP=m CONFIG_MSI_WMI=m @@ -6603,7 +6622,9 @@ CONFIG_PWM_LPSS_PLATFORM=m # end of IRQ chip support # CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set # # PHY Subsystem @@ -6627,6 +6648,7 @@ CONFIG_PWM_LPSS_PLATFORM=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL_CORE=m CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set CONFIG_IDLE_INJECT=y # CONFIG_MCB is not set @@ -6660,6 +6682,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=y CONFIG_DEV_DAX_PMEM=y CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y CONFIG_DEV_DAX_KMEM=y CONFIG_NVMEM=y @@ -6740,7 +6763,13 @@ CONFIG_XFS_WARN=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -7374,7 +7403,7 @@ CONFIG_CRYPTO_POLY1305_X86_64=m CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m @@ -7633,8 +7662,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 981a32ab9cf6..145ac54de178 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -39,13 +39,13 @@ CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -# CONFIG_KERNEL_ZSTD is not set +CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y @@ -364,7 +364,7 @@ CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_DEBUG is not set -# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_PARAVIRT_SPINLOCKS=y CONFIG_X86_HV_CALLBACK_VECTOR=y CONFIG_XEN=y # CONFIG_XEN_PV is not set @@ -411,7 +411,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y +# CONFIG_SCHED_CLUSTER is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -574,6 +574,7 @@ CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y # CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y # CONFIG_ACPI_FPDT is not set @@ -819,7 +820,7 @@ CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y CONFIG_STACKPROTECTOR=y -# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_STACKPROTECTOR_STRONG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y CONFIG_LTO_NONE=y @@ -937,6 +938,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1979,6 +1981,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=y CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_LOCKLESS_CONFIG=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -2030,7 +2033,17 @@ CONFIG_PCI_HYPERV_INTERFACE=m # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -2069,7 +2082,7 @@ CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_WANT_DEV_COREDUMP=y @@ -2497,7 +2510,7 @@ CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m CONFIG_FCOE_FNIC=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -5848,10 +5861,14 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m # # Intel Speed Select Technology interface support # +CONFIG_INTEL_SPEED_SELECT_TPMI=m CONFIG_INTEL_SPEED_SELECT_INTERFACE=m # end of Intel Speed Select Technology interface support @@ -5872,9 +5889,11 @@ CONFIG_INTEL_OAKTRAIL=m # CONFIG_INTEL_ISHTP_ECLITE is not set # CONFIG_INTEL_PUNIT_IPC is not set CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set # CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m CONFIG_INTEL_TURBO_MAX_3=y -# CONFIG_INTEL_VSEC is not set +CONFIG_INTEL_VSEC=y # CONFIG_MSI_EC is not set CONFIG_MSI_LAPTOP=m CONFIG_MSI_WMI=m @@ -6592,7 +6611,9 @@ CONFIG_PWM_LPSS_PLATFORM=m # end of IRQ chip support # CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set # # PHY Subsystem @@ -6616,6 +6637,7 @@ CONFIG_PWM_LPSS_PLATFORM=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL_CORE=m CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set CONFIG_IDLE_INJECT=y # CONFIG_MCB is not set @@ -6649,6 +6671,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=y CONFIG_DEV_DAX_PMEM=y CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y CONFIG_DEV_DAX_KMEM=y CONFIG_NVMEM=y @@ -6729,7 +6752,13 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -7363,7 +7392,7 @@ CONFIG_CRYPTO_POLY1305_X86_64=m CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m @@ -7620,8 +7649,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -- Gitee From ae9212d75e3d152e82600551988ec28cec530160 Mon Sep 17 00:00:00 2001 From: MinLi Date: Tue, 12 Mar 2024 20:07:33 +0800 Subject: [PATCH 390/953] anolis: loongarch: anck 6.6 support loongarch ANBZ: #7099 ANCK-6.6 supports loongarch arch firstly, commit spec and corresponding scripts and config Signed-off-by: Gu Mi Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2935 --- anolis/genrpmtree.sh | 5 +- anolis/rpm/kernel.spec.template | 37 +- arch/loongarch/configs/anolis-debug_defconfig | 2203 +++++++++++++++++ arch/loongarch/configs/anolis_defconfig | 2203 +++++++++++++++++ 4 files changed, 4438 insertions(+), 10 deletions(-) create mode 100644 arch/loongarch/configs/anolis-debug_defconfig create mode 100644 arch/loongarch/configs/anolis_defconfig diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh index dd19f111f649..7f1f4036f797 100644 --- a/anolis/genrpmtree.sh +++ b/anolis/genrpmtree.sh @@ -28,7 +28,10 @@ function do_prep() { cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config - + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64.config + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis-debug_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64-debug.config } do_prep \ No newline at end of file diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index ff6c6285960b..109e09ae53fe 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -58,6 +58,12 @@ # should we do C=1 builds with sparse %define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} +#For loongarch, disable kernel-debug and with_bpftool for unsupport. +%ifarch loongarch64 +%define with_debug 0 +%define with_bpftool 0 +%endif + %define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} # turn off debug kernel for gcov builds @@ -133,6 +139,14 @@ %define kernel_image arch/arm64/boot/Image.gz %endif +%ifarch loongarch64 +%define all_arch_configs %{name}-%{version}-loongarch64*.config +%define asmarch loongarch +%define make_target vmlinux +%define hdrarch loongarch +%define kernel_image vmlinux +%endif + # To temporarily exclude an architecture from being built, add it to # %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we # don't build kernel-headers then the new build system will no longer let @@ -173,7 +187,7 @@ Release: %{pkg_release} Summary: The Linux kernel, based on version %{version}, heavily modified with backports # DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. # SET %%nobuildarches (ABOVE) INSTEAD -ExclusiveArch: noarch i686 x86_64 aarch64 +ExclusiveArch: noarch i686 x86_64 aarch64 loongarch64 ExclusiveOS: Linux @@ -252,7 +266,8 @@ Source21: kernel-%{version}-aarch64-debug.config Source39: kernel-%{version}-x86_64.config Source40: kernel-%{version}-x86_64-debug.config Source43: generate_bls_conf.sh - +Source45: kernel-%{version}-loongarch64.config +Source46: kernel-%{version}-loongarch64-debug.config # Sources for kernel-tools @@ -973,13 +988,17 @@ BuildKernel() { # Run depmod on the resulting module tree and make sure it isn't broken depmod -b . -aeF ./System.map $KernelVer &> depmod.out - if [ -s depmod.out ]; then - echo "Depmod failure" - cat depmod.out - exit 1 - else - rm depmod.out - fi + %ifnarch loongarch64 + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + %else + rm -rf depmod.out + %endif else # Ensure important files/directories exist to let the packaging succeed mkdir -p lib/modules/$KernelVer/kernel diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..db41cbf5efd4 --- /dev/null +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -0,0 +1,2203 @@ +# +## Automatically generated file; DO NOT EDIT. +## Linux/loongarch 6.6.7 Kernel Configuration +## +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +CONFIG_HIBERNATION=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_IOV=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_ATA_PIIX=m +CONFIG_PATA_ATIIXP=y +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m +CONFIG_RT2X00=m +CONFIG_RT2800PCI=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINCTRL_LOONGSON2=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_COMMON_CLK_LOONGSON2=y +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig new file mode 100644 index 000000000000..db41cbf5efd4 --- /dev/null +++ b/arch/loongarch/configs/anolis_defconfig @@ -0,0 +1,2203 @@ +# +## Automatically generated file; DO NOT EDIT. +## Linux/loongarch 6.6.7 Kernel Configuration +## +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +CONFIG_HIBERNATION=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_IOV=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_ATA_PIIX=m +CONFIG_PATA_ATIIXP=y +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m +CONFIG_RT2X00=m +CONFIG_RT2800PCI=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINCTRL_LOONGSON2=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_COMMON_CLK_LOONGSON2=y +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set -- Gitee From 3946408f090d5d839f55a57d52c3f768c4748d9a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 06:08:07 -0500 Subject: [PATCH 391/953] anolis: crypto: ccp: Support DOWNLOAD_FIRMWARE when detect CSV ANBZ: #8571 When ccp driver detect CSV support on Hygon CPU, it should try to update the latest CSV firmware on the system paths. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 909cf8c964f9..9d6c407826bc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -35,6 +35,7 @@ #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" +#define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 static DEFINE_MUTEX(sev_cmd_mutex); @@ -100,6 +101,11 @@ static inline bool sev_version_greater_or_equal(u8 maj, u8 min) return false; } +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + static void sev_irq_handler(int irq, void *data, unsigned int status) { struct sev_device *sev = data; @@ -756,6 +762,14 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* Check for CSV FW to using generic name: csv.fw */ + if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) + return 0; + else + return -ENOENT; + } + snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); @@ -794,7 +808,9 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15)) { + if (!sev_version_greater_or_equal(0, 15) && + (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + !csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } @@ -840,9 +856,14 @@ static int sev_update_firmware(struct device *dev) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + dev_dbg(dev, "Failed to update %s firmware: %#x\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV", + error); else - dev_info(dev, "SEV firmware update successful\n"); + dev_info(dev, "%s firmware update successful\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV"); __free_pages(p, order); -- Gitee From 55efb69cc133417f1aa8d7f960ab815e8103f92c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:31:27 -0500 Subject: [PATCH 392/953] anolis: crypto: ccp: Implement CSV_PLATFORM_INIT ioctl command ANBZ: #8571 The CSV_PLATFORM_INIT command can be used by the platform owner to switch platform from PSTATE.UNINIT to PSTATE.INIT. In the upcoming patches, we'll support DOWNLOAD_FIRMWARE at userspace. Due to DOWNLOAD_FIRMWARE can only performed when platform is in the PSTATE.UNINIT, we need invoke PLATFORM_INIT following DOWNLOAD_FIRMWARE to switch platform back to PSTATE.INIT. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 3 +++ include/uapi/linux/psp-sev.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9d6c407826bc..74685778207d 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1187,6 +1187,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { switch (input.cmd) { + case CSV_PLATFORM_INIT: + ret = __sev_platform_init_locked(&input.error); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index ae76776c0b15..a82643c0d795 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -36,6 +36,7 @@ enum { * CSV platform commands */ enum { + CSV_PLATFORM_INIT = 101, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, -- Gitee From 47fa79b39624520342cbf32440dca46256510a51 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:33:25 -0500 Subject: [PATCH 393/953] anolis: crypto: ccp: Implement CSV_PLATFORM_SHUTDOWN ioctl command ANBZ: #8571 The CSV_PLATFORM_SHUTDOWN command can be used by the platform owner to switch platform to PSTATE.UNINIT. The DOWNLOAD_FIRMWARE API can only performed when platform is in the PSTATE.UNINIT. In order to support DOWNLOAD_FIRMWARE at userspace, we need invoke PLATFORM_SHUTDOWN before that. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 3 +++ include/uapi/linux/psp-sev.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 74685778207d..390c9151dba5 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1190,6 +1190,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_INIT: ret = __sev_platform_init_locked(&input.error); goto result_to_user; + case CSV_PLATFORM_SHUTDOWN: + ret = __sev_platform_shutdown_locked(&input.error); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index a82643c0d795..8ea91a7f9521 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -37,6 +37,7 @@ enum { */ enum { CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, -- Gitee From 2e35bcda6f1e3711a82349231032a076256743fe Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:58:23 -0500 Subject: [PATCH 394/953] anolis: crypto: ccp: Implement CSV_DOWNLOAD_FIRMWARE ioctl command ANBZ: #8571 The CSV_DOWNLOAD_FIRMWARE command can be used by the platform owner to updating CSV firmware. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 70 ++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 2 ++ include/uapi/linux/psp-sev.h | 12 +++++++ 3 files changed, 84 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 390c9151dba5..4556aa8b4e5e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1115,6 +1115,73 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +{ + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; + + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = __sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, data, &argp->error); + if (ret) + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + else + pr_info("CSV firmware update successful\n"); + +err_free_page: + __free_pages(p, order); + + return ret; +} + static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) { struct csv_user_data_hgsc_cert_import input; @@ -1193,6 +1260,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_SHUTDOWN: ret = __sev_platform_shutdown_locked(&input.error); goto result_to_user; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 2b40efb57274..4d34d0f3d019 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -16,6 +16,8 @@ #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + /** * SEV platform state */ diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 8ea91a7f9521..07db804852a2 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -38,6 +38,7 @@ enum { enum { CSV_PLATFORM_INIT = 101, CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, @@ -180,6 +181,17 @@ struct csv_user_data_hgsc_cert_import { __u32 hgsc_cert_len; /* In */ } __packed; +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * -- Gitee From 199b03de7bf1a79db73209c2039c675677c1e27d Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 14:42:49 +0800 Subject: [PATCH 395/953] anolis: crypto: ccp: Introduce init and free helpers to manage CSV RING_BUFFER queues ANBZ: #8572 There are up to two queues created in RING_BUFFER mode, each with two sub-queues. The sub-queues store the command pointer entries (written only by the x86) and status entries (written only by the CSV Firmware) respectively. The two queues are low priority queue (required) and high priority queue (optional) respectively. In this change, we introduce csv_ring_buffer_queue_init() to initialize CSV RING_BUFFER queues, and csv_ring_buffer_queue_free() to cleanup CSV RING_BUFFER queues. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/psp-ringbuf.c | 29 +++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 31 ++++++++++++ drivers/crypto/ccp/sev-dev.c | 87 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.h | 4 ++ include/linux/psp-sev.h | 38 ++++++++++++++ 6 files changed, 191 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/psp-ringbuf.c create mode 100644 drivers/crypto/ccp/psp-ringbuf.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e..82be0ac4a0b6 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,7 +12,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + psp-ringbuf.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c new file mode 100644 index 000000000000..485c6da91ca9 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "psp-ringbuf.h" + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h new file mode 100644 index 000000000000..cb6f1f7b5736 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __PSP_RINGBUF_H__ +#define __PSP_RINGBUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); + +#endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 4556aa8b4e5e..b8bf1d60bb54 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1350,6 +1350,93 @@ int sev_guest_df_flush(int *error) } EXPORT_SYMBOL_GPL(sev_guest_df_flush); +int csv_ring_buffer_queue_free(void); + +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int ret = 0; + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) { + ret = -ENOMEM; + goto free_cmdptr; + } + + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + return 0; + +free_cmdptr: + kfree(cmd_ptr_buffer); + + return ret; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + ring_buffer->cmd_ptr.data = 0; + } + + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + ring_buffer->stat_val.data = 0; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e74..372183b8c58f 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "psp-ringbuf.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -52,6 +54,8 @@ struct sev_device { u8 build; void *cmd_buf; + + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 4d34d0f3d019..9656c4179581 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -88,6 +88,18 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct sev_data_init - INIT command parameters * @@ -546,6 +558,24 @@ struct csv_data_hgsc_cert_import { u32 hgsc_cert_len; /* In */ } __packed; +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** @@ -662,6 +692,10 @@ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 uaddr, u32 len); +int csv_ring_buffer_queue_init(void); + +int csv_ring_buffer_queue_free(void); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -685,6 +719,10 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From ce642a88b10c1496eb89dd2cad4e69a1ef622749 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:21:33 +0800 Subject: [PATCH 396/953] anolis: crypto: ccp: Add support for enqueue command pointers in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will enqueue command pointers to the sub-queue which stores the command pointers. The priority will be given through parameter. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-ringbuf.c | 51 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 22 ++++++++++++++ include/linux/psp-sev.h | 12 ++++++++ 4 files changed, 87 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 485c6da91ca9..e2c236b71fec 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -13,6 +13,43 @@ #include "psp-ringbuf.h" +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize) { @@ -27,3 +64,17 @@ int csv_queue_init(struct csv_queue *queue, return 0; } + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index cb6f1f7b5736..416caefb06a2 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -27,5 +27,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index b8bf1d60bb54..7a5693eb9d2b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1383,6 +1383,28 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer return ret; } +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 9656c4179581..6083a68dcdac 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -562,6 +562,13 @@ struct csv_data_hgsc_cert_import { #define CSV_COMMAND_PRIORITY_LOW 1 #define CSV_COMMAND_PRIORITY_NUM 2 +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -696,6 +703,8 @@ int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -723,6 +732,9 @@ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 8e9c0e34360f9942c8fb5aced84d0eddee9a4d8c Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:35:47 +0800 Subject: [PATCH 397/953] anolis: crypto: ccp: Add support for dequeue status in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will dequeue status entries written by PSP after the corresponding command has been handled. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-ringbuf.c | 39 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 32 ++++++++++++++++++++++++++ include/linux/psp-sev.h | 11 +++++++++ 4 files changed, 84 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index e2c236b71fec..3b2f461b672c 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -78,3 +78,42 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, queue->tail += len; return len; } + +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 416caefb06a2..50e014deb5ce 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -29,5 +29,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 7a5693eb9d2b..0451609aad3a 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1405,6 +1405,38 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) } EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 6083a68dcdac..84ae53a6f354 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -569,6 +569,13 @@ struct csv_cmdptr_entry { u64 cmd_buf_ptr; } __packed; +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -705,6 +712,8 @@ int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -735,6 +744,8 @@ static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 6e746bae7b1ec729633892d7be47e43ba3bebda0 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:03:54 +0800 Subject: [PATCH 398/953] anolis: crypto: ccp: Add support to switch to CSV RING_BUFFER mode ANBZ: #8572 Invoke RING_BUFFER command will switch CSV firmware to RING_BUFFER mode. When CSV firmware stays in RING_BUFFER mode, it will fetch commands from CSV RING_BUFFER queues which are filled by X86. The CSV firmware will exit RING_BUFFER mode after SHUTDOWN command is completed. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/sev-dev.c | 51 ++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 40 ++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0451609aad3a..64de04b2c698 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -65,6 +65,8 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; +static int csv_comm_mode = CSV_COMM_MAILBOX_ON; + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -144,6 +146,8 @@ static int sev_cmd_buffer_len(int cmd) switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: + return sizeof(struct csv_data_ring_buffer); default: break; } @@ -411,6 +415,48 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -562,6 +608,11 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); + } + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 84ae53a6f354..f9827593d060 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -83,7 +83,18 @@ enum sev_cmd { SEV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, CSV_CMD_HGSC_CERT_IMPORT = 0x300, CSV_CMD_MAX, }; @@ -590,6 +601,35 @@ struct csv_ringbuffer_queue { struct csv_queue stat_val; } __packed; +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** -- Gitee From de5fb591ea47c92d2f8e2a996deabb28b27b85d0 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:36:19 +0800 Subject: [PATCH 399/953] anolis: crypto: ccp: Add support for issue commands in CSV RING_BUFFER mode ANBZ: #8572 The CSV firmware stays in Mailbox mode by default. Upon successfully switched to CSV RING_BUFFER mode, the semantics of the 3 registers used for communicate between X86 and CSV firmware will be changed: - The CmdResp register becomes the RBCtl register. It is only ever written by X86. - The CmdBufAddr_Hi register becomes the RBTail register. It is only ever written by X86. - The CmdBufAddr_Lo register becomes the RBHead register. It should never be written by X86; the PSP will update it. The CSV firmware will exit CSV RING_BUFFER mode when it read invalid value from the RBCtl register. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-dev.h | 13 ++++ drivers/crypto/ccp/sev-dev.c | 125 ++++++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 9 +++ 3 files changed, 146 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8a4de69399c5..45b6e17d5770 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -17,6 +17,19 @@ #include "sp-dev.h" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + #define MAX_PSP_NAME_LEN 16 extern struct psp_device *psp_master; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 64de04b2c698..49e8bfbcedaa 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -119,7 +119,9 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (csv_comm_mode == CSV_COMM_RINGBUFFER_ON))) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -140,6 +142,22 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, return 0; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + static int sev_cmd_buffer_len(int cmd) { if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { @@ -457,6 +475,102 @@ static int __csv_ring_buffer_enter_locked(int *error) return ret; } +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + + mutex_lock(&sev_cmd_mutex); + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -1691,6 +1805,15 @@ int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index f9827593d060..05e22a17fcb5 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -754,6 +754,12 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); int csv_check_stat_queue_status(int *psp_ret); +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -786,6 +792,9 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return - static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From fe268ff85c611fedfa7ae6ab9b6314fc45f3013e Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:51:55 +0800 Subject: [PATCH 400/953] anolis: KVM: SVM: Add KVM_CSV_COMMAND_BATCH command for applying CSV RING_BUFFER mode ANBZ: #8572 The API KVM_CSV_COMMAD_BATCH receives data of structure kvm_csv_command_batch which embedded a link list of CSV command requests from userspace. It will do some preparation works to ensure data available for CSV RING_BUFFER mode, and then issues RING_BUFFER command. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/include/asm/svm.h | 20 +++++ arch/x86/kvm/svm/sev.c | 176 +++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 14 +++ 3 files changed, 210 insertions(+) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 3ac0ffc4f3e2..24b6a7e60f33 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -680,4 +680,24 @@ DEFINE_GHCB_ACCESSORS(sw_exit_info_2) DEFINE_GHCB_ACCESSORS(sw_scratch) DEFINE_GHCB_ACCESSORS(xcr0) +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #endif diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 756df9e46e67..a310f0787867 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,8 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + static const char sev_vm_mnonce[] = "VM_ATTESTATION"; struct enc_region { @@ -320,6 +322,28 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __sev_issue_cmd(sev->fd, id, data, error); } +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} + static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1854,6 +1878,8 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) return ret; } +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp); + int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1938,6 +1964,14 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; + case KVM_CSV_COMMAND_BATCH: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + } + fallthrough; default: r = -EINVAL; goto out; @@ -3212,3 +3246,145 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) sev_unpin_memory(kvm, pages, n); return ret; } + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + sev_unpin_memory(kvm, item->pages, item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2eea9dd73c64..79a0007c6e0f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1932,6 +1932,9 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, + /* Hygon CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + KVM_SEV_NR_MAX, }; @@ -2028,6 +2031,17 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 56b26d3190cd7848d0bfc4aa605d92df56e015e5 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 18:22:04 +0800 Subject: [PATCH 401/953] anolis: KVM: SVM: Prepare memory pool to allocate buffers for KVM_CSV_COMMAND_BATCH ANBZ: #8572 In the upcoming patches, many buffers need to be allocated in KVM_CSV_COMMAND_BATCH code paths. To avoid memory allocation failures, directly allocate a memory pool in sev_hardware_setup() and free the memory pool in sev_hardware_teardown(). When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA/RECEIVE_UPDATE_DATA commands, it will allocate trans buffers from the memory pool. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 102 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a310f0787867..b2cfc430209b 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -79,6 +79,9 @@ static DEFINE_MUTEX(csv_cmd_batch_mutex); static const char sev_vm_mnonce[] = "VM_ATTESTATION"; +static int alloc_trans_mempool(void); +static void free_trans_mempool(void); + struct enc_region { struct list_head list; unsigned long npages; @@ -2270,6 +2273,16 @@ void __init sev_hardware_setup(void) goto out; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (alloc_trans_mempool()) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } + } + sev_asid_count = max_sev_asid - min_sev_asid + 1; WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); sev_supported = true; @@ -2327,6 +2340,9 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + free_trans_mempool(); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -3247,6 +3263,91 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +static int alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "CSV: g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + g_mempool_offset = 0; + return 0; + +free_trans_mempool: + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + return -ENOMEM; +} + +static void free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + g_mempool_offset = 0; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("CSV: mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -3382,6 +3483,7 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) err_free_ring_buffer_infos_items: csv_ringbuf_infos_free(kvm, ringbuf_infos); kfree(ringbuf_infos); + reset_mempool_offset(); err_free_ring_buffer: csv_ring_buffer_queue_free(); -- Gitee From 38daf643d8d39f07fff553b987ab13c1338ca538 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:38:41 +0800 Subject: [PATCH 402/953] anolis: KVM: SVM: Add SEND_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA commands, it need execute 3 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command 3. Copy the output of RING_BUFFER command to userspace In this change, we add sev_send_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1, and add sev_send_update_data_copy_to_user() to copy output userspace as dictated in step 3. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 143 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b2cfc430209b..42bc79b9f7d7 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3375,6 +3375,145 @@ static int csv_ringbuf_infos_free(struct kvm *kvm, return 0; } +static int +sev_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans_data; + item->data_vaddr = (uintptr_t)data; + item->hdr_uaddr = params.hdr_uaddr; + item->trans_uaddr = params.trans_uaddr; + item->hdr_len = params.hdr_len; + item->trans_len = params.trans_len; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + +finish: + return ret; +} + +static int +sev_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3389,6 +3528,10 @@ static int get_cmd_helpers(__u32 cmd, /* copy commands to ring buffer*/ switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = sev_send_update_data_to_ringbuf; + *to_user_fn = sev_send_update_data_copy_to_user; + break; default: ret = -EINVAL; break; -- Gitee From dd397654a9f01d06250727a1898882e7cb35db32 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:50:54 +0800 Subject: [PATCH 403/953] anolis: KVM: SVM: Add RECEIVE_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of RECEIVE_UPDATE_DATA commands, it need execute 2 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command In this change, we add sev_receive_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 121 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 42bc79b9f7d7..8d9411c6e051 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3514,6 +3514,123 @@ sev_send_update_data_copy_to_user(struct kvm *kvm, return ret; } +static int +sev_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_unpin: + sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + +finish: + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3532,6 +3649,10 @@ static int get_cmd_helpers(__u32 cmd, *to_ringbuf_fn = sev_send_update_data_to_ringbuf; *to_user_fn = sev_send_update_data_copy_to_user; break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = sev_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; default: ret = -EINVAL; break; -- Gitee From 31f647a25a3405bc1ea7ff1d05eac62bce742b4a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 24 May 2022 22:03:04 +0800 Subject: [PATCH 404/953] anolis: crypto: ccp: Fix definition of struct sev_data_send_update_vmsa ANBZ: #8572 The current definition of struct sev_data_send_update_vmsa in include/linux/psp-sev.h does not comply with SEV API spec. Fix it here. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- include/linux/psp-sev.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 05e22a17fcb5..55dd35ce920f 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -417,6 +417,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; -- Gitee From f281db5fa6c37e5b465648a2f34e2c0d04d467f5 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:07:08 -0400 Subject: [PATCH 405/953] anolis: KVM: SVM: Add KVM_SEV_SEND_UPDATE_VMSA command ANBZ: #8572 The command is used for encrypting the VCPU register states of CSV2 guest using the encryption context created with KVM_SEV_SEND_START. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 115 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 123 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 8d9411c6e051..f5c0b3affa52 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1399,6 +1399,115 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_sev_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int sev_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_sev_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1952,6 +2061,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; + case KVM_SEV_SEND_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_send_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 79a0007c6e0f..1c4a04525823 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2013,6 +2013,14 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; +struct kvm_sev_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_sev_receive_start { __u32 handle; __u32 policy; -- Gitee From f6e6948860252cd741d58bd2b6c5c99efcac0b62 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:39:49 -0400 Subject: [PATCH 406/953] anolis: KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_VMSA command ANBZ: #8572 The command is used for copying the incoming buffer into the VMSA memory regions of CSV2 guest. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 81 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 ++++ 2 files changed, 89 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index f5c0b3affa52..2a2241dbbf16 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1683,6 +1683,81 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -2079,6 +2154,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_receive_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1c4a04525823..a12f1a4a2f32 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2039,6 +2039,14 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_sev_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_csv_batch_list_node { __u64 cmd_data_addr; __u64 addr; -- Gitee From 8a0648368609b14723bd37c702c36a5c184e0ae2 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Apr 2021 02:46:11 -0400 Subject: [PATCH 407/953] anolis: KVM: x86: Restore control registers in __set_sregs() to support CSV2 guest live migration ANBZ: #8572 When migrate CSV2 guest to the recipient, the KVM which on recipient's side needs to update the guest context so that the guest can continues to run. The control register state is necessary for updating the guest context. Allows the control registers to be updated in __set_sregs() so that the CSV2 guest could continue running correctly after migrated to the recipient. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/x86.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5eeba2b810ca..5f24b02a1674 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11531,21 +11531,24 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; - dt.size = sregs->idt.limit; - dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); - dt.size = sregs->gdt.limit; - dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + if (!vcpu->arch.guest_state_protected) { + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); - vcpu->arch.cr2 = sregs->cr2; - *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; - vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + vcpu->arch.cr2 = sregs->cr2; + *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; + vcpu->arch.cr3 = sregs->cr3; + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + } kvm_set_cr8(vcpu, sregs->cr8); @@ -11559,6 +11562,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { -- Gitee From 54378dcde7a51fe4965558395870ec5c390f1cf8 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 15 Jun 2021 11:29:13 +0800 Subject: [PATCH 408/953] anolis: KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest ANBZ: #8572 VMCB.control.ghcb_gpa contains necessary info to support runtime CSV2 guest. At present, it includes the following points: 1. For GHCB MSR protocol, ghcb_gpa stores the negotiation result 2. For GHCB page protocol, ghcb_gpa stores the GPA of GHCB page In addition, AP VCPU's SIPI state and GHCB page mapping state are temporarily stored in KVM. When CSV2 guest was migrated to the recipient, KVM needs to restore VMCB.control.ghcb_gpa, VCPU's SIPI state and GHCB page mapping state on the source side. This patch is to support export MSR_AMD64_SEV_ES_GHCB to userspace. KVM can collect all the infos dictated above and return to userspace if userspace request to get MSR_AMD64_SEV_ES_GHCB, and KVM can restore all the infos dictated above if userspace request to set MSR_AMD64_SEV_ES_GHCB. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 19 +++++++++ arch/x86/kvm/svm/svm.c | 88 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.h | 42 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 13 ++++++ include/uapi/linux/kvm.h | 2 + 6 files changed, 165 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2a2241dbbf16..a47e6c5aca51 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3950,3 +3950,22 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } + +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 13cb7b2919c7..7353c45982f6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2946,6 +2946,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr_info->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + } + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3181,6 +3206,47 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, + * userspace need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && + (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on recipient of migration, + * should return error if cannot map the + * ghcb page. + */ + if (sev_es_ghcb_map(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + } + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4136,6 +4202,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); + /* + * For receipient side of CSV2 guest, fake the exit code as + * SVM_EXIT_ERR and return directly if failed to mapping + * the necessary GHCB page. When handling the exit code + * afterwards, it can exit to userspace and stop the guest. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + sev_es_guest(vcpu->kvm) && + svm->sev_es.receiver_ghcb_map_fail) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4310,6 +4389,15 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* + * Only CSV2 guests support to export this MSR, this should + * be determined after KVM_CREATE_VM. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + (kvm && !sev_es_guest(kvm))) + return false; + break; default: break; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b2249897a7db..82ff3f931dcf 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -202,6 +202,9 @@ struct vcpu_sev_es_state { u32 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + /* CSV2 migrated ghcb mapping state support */ + bool receiver_ghcb_map_fail; }; struct vcpu_svm { @@ -665,6 +668,44 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); #define GHCB_VERSION_MAX 1ULL #define GHCB_VERSION_MIN 1ULL +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + + +static inline bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return ghcb_val & GHCB_MSR_INFO_MASK; +} extern unsigned int max_sev_asid; @@ -694,6 +735,7 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); /* vmenter.S */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b2ed051611b0..0ca940187502 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7016,6 +7016,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5f24b02a1674..a979be6b6d91 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1564,6 +1564,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -4636,6 +4638,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_caps.has_notify_vmexit; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a12f1a4a2f32..11e57082efd7 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1201,6 +1201,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_SEV_ES_GHCB 500 + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { -- Gitee From 50ec94be7416a34163bdd1a80a41ad33c113af11 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 8 Aug 2023 23:47:22 +0800 Subject: [PATCH 409/953] anolis: KVM: x86: Introduce control_{pre,post}_system_reset ioctl interfaces ANBZ: #8572 In the upcoming patches, we will support for rebooting CSV2 guests. In order to support rebooting CSV2 guest, we will set vcpu->arch.guest_state_protected to false, before VMRUN, so that VMM can initialize vCPU states and VMSA, and then set vcpu->arch.guest_state_protected back to true to bypass unexpected behaviour in KVM. Besides, cache flush is necessary during rebooting a memory encrypted guest. Introduce control_{pre,post}_system_reset ioctl interfaces to support rebooting memory encrypted guests correctly. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/include/asm/kvm-x86-ops.h | 2 ++ arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/sev.c | 10 ++++++++++ arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/svm/svm.h | 3 +++ arch/x86/kvm/x86.c | 14 ++++++++++++++ include/uapi/linux/kvm.h | 4 ++++ 7 files changed, 37 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 3ab3e361ea81..b54e72a0100b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -136,6 +136,8 @@ KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0dbbe96afb8a..a46465695e0d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1752,6 +1752,8 @@ struct kvm_x86_ops { unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a47e6c5aca51..ba2627eab776 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3969,3 +3969,13 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) return 0; } + +int csv_control_pre_system_reset(struct kvm *kvm) +{ + return 0; +} + +int csv_control_post_system_reset(struct kvm *kvm) +{ + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7353c45982f6..e688fcb72181 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5132,6 +5132,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, .vm_attestation = sev_vm_attestation, + .control_pre_system_reset = csv_control_pre_system_reset, + .control_post_system_reset = csv_control_post_system_reset, }; /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 82ff3f931dcf..dc318ed71e84 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -737,6 +737,9 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); +int csv_control_pre_system_reset(struct kvm *kvm); +int csv_control_post_system_reset(struct kvm *kvm); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a979be6b6d91..547b5143fbba 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7118,6 +7118,20 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 11e57082efd7..5c0859e7597f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,10 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) +/* ioctls for control vm during system reset */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + /* * ioctls for vcpu fds */ -- Gitee From 18c0ae9447171b47aca264013913a7a23351c328 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 15 Apr 2021 07:56:55 -0400 Subject: [PATCH 410/953] anolis: KVM: SVM: Add support for rebooting CSV2 guest ANBZ: #8572 Currently, reboot a CSV2 guest is unsupported because vCPU state is encrypted and can't be initialized when guest reboots to execute OVMF code. In order to support reboot a CSV2 guest, make a backup of the encrypted VMSA before booting the guest, and restore VMSA from the backup before rebooting the guest. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 59 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 10 +++++++ arch/x86/kvm/svm/svm.h | 2 ++ 3 files changed, 71 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index ba2627eab776..046bc7a4de3b 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -686,6 +686,18 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, return ret; vcpu->arch.guest_state_protected = true; + + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + } + return 0; } @@ -2622,6 +2634,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); } static void dump_ghcb(struct vcpu_svm *svm) @@ -3972,10 +3986,55 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) int csv_control_pre_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } int csv_control_post_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + /* Flush both host and guest caches of VMSA */ + wbinvd_on_all_cpus(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e688fcb72181..cf497583b841 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1435,6 +1435,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) struct vcpu_svm *svm; struct page *vmcb01_page; struct page *vmsa_page = NULL; + struct page *reset_vmsa_page = NULL; int err; BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); @@ -1454,6 +1455,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + goto error_free_vmsa_page; + /* * SEV-ES guests maintain an encrypted version of their FPU * state which is restored and saved on VMRUN and VMEXIT. @@ -1482,6 +1487,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (vmsa_page) svm->sev_es.vmsa = page_address(vmsa_page); + if (reset_vmsa_page) + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + svm->guest_state_loaded = false; return 0; @@ -1489,6 +1497,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); + if (reset_vmsa_page) + __free_page(reset_vmsa_page); error_free_vmcb_page: __free_page(vmcb01_page); out: diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index dc318ed71e84..3be7e827f9b2 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -205,6 +205,8 @@ struct vcpu_sev_es_state { /* CSV2 migrated ghcb mapping state support */ bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; }; struct vcpu_svm { -- Gitee From 36ceeaff97075f914dc12633db3fe4466e06ba5f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 6 May 2023 16:01:25 +0800 Subject: [PATCH 411/953] anolis: KVM: SVM: Force flush caches before reboot CSV guest ANBZ: #8572 For memory encrypted guest, its pages' encrypt status will changed at runtime. When user reboot the guest, the pages' encrypt status during last boot were ignored. So during the boot flow of reboot, there may be 2 versions of memory data lies in cache as follows: +--------+ | | | | +--------------+ --+ | | | | \ |________| | | \ cacheline for -> |________| <-+ | | \ pa1(c=0) | | \ |______________| \ | | \_ 64 bytes aligned <- pa1 \ | | _ |______________| 4K | | / | | page cacheline for |________| / | | / pa1(c=1) -> |________| <-+ | | / | | | | / | | | | / | | | | / | | +--------------+ --+ | | | | If the older version cache was flushed after that of newer version, and guest read the memory again, then it will get corrupted data and may lead to crash. In this change, for any memory encrypted guest, the cache is forcibly flushed to memory before the next boot flow, which ensures that memory access is up-to-date. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 046bc7a4de3b..7e467e458992 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4012,12 +4012,12 @@ int csv_control_post_system_reset(struct kvm *kvm) unsigned long i; int ret; + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + if (!sev_es_guest(kvm)) return 0; - /* Flush both host and guest caches of VMSA */ - wbinvd_on_all_cpus(); - kvm_for_each_vcpu(i, vcpu, kvm) { struct vcpu_svm *svm = to_svm(vcpu); -- Gitee From 7e02036a12f5a3e23d8548debb0702d0a25f2302 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Jan 2021 14:57:21 -0500 Subject: [PATCH 412/953] anolis: KVM: SVM: Fix the available ASID range for CSV2 guest ANBZ: #8573 All the ASIDs in range [1, max_sev_asid] are available for CSV2 guest, regardless of the value of min_sev_asid. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/svm/sev.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7e467e458992..7ec809349b9a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -170,6 +170,13 @@ static int sev_asid_new(struct kvm_sev_info *sev) */ min_asid = sev->es_active ? 1 : min_sev_asid; max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; + + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + max_asid = max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -2512,11 +2519,19 @@ void __init sev_hardware_setup(void) if (!boot_cpu_has(X86_FEATURE_SEV_ES)) goto out; - /* Has the system been allocated ASIDs for SEV-ES? */ - if (min_sev_asid == 1) - goto out; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Ths ASIDs from 1 to max_sev_asid are available for hygon + * CSV2 guest. + */ + sev_es_asid_count = max_sev_asid; + } else { + /* Has the system been allocated ASIDs for SEV-ES? */ + if (min_sev_asid == 1) + goto out; - sev_es_asid_count = min_sev_asid - 1; + sev_es_asid_count = min_sev_asid - 1; + } WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; @@ -2530,7 +2545,10 @@ void __init sev_hardware_setup(void) pr_info("%s %s (ASIDs %u - %u)\n", boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", - min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + 1 : (min_sev_asid > 1 ? 1 : 0), + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + max_sev_asid : min_sev_asid - 1); sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; -- Gitee From 160fece72c3b8077b843f7ff7fce3c90922671ed Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 6 Apr 2023 09:03:58 +0800 Subject: [PATCH 413/953] anolis: x86/csv2: Keep in atomic context when holding ghcb page if the #VC comes from userspace ANBZ: #8573 In function vc_raw_handle_exception(), it will holds ghcb page and calls __sev_get_ghcb() <- holding ghcb page to communicate with host vc_init_em_etxt() vc_handle_exitcode() __sev_put_ghcb() <- no longer holding ghcb page after the communication to emulate instruction which cause #VC. When the #VC comes from userspace, the code path user_exc_vmm_communication() vc_raw_handle_exception() cannot keep memory access in atomic context, this may lead to direct page fault handling if the emulation process access userspace address which doesn't exist in memory. For userspace address page fault handling, if it's not in the atomic context or the caller doesn't call pagefault_disable(), the irq may be enabled and there is a risk of generating more #VC. So it's necessary to switch to atomic context before emulate instructions which cause #VC. Add __preempt_count_{add,sub}() pair to keep the code between __sev_get_ghcb() and __sev_put_ghcb() in atomic context if #VC comes from userspace. If memory access fails during emulating, the caller will construct page fault info and forward a page fault later. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kernel/sev.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index a8db68a063c4..561120fce3fa 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -1850,6 +1850,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co struct ghcb *ghcb; bool ret = true; + /* + * Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb() + * keep in atomic context. If #VC comes from kernel mode, then the + * codes here are in atomic context. If #VC comes from user mode, then + * it's necessary to switch to atomic context manually. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_add(HARDIRQ_OFFSET); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); @@ -1860,6 +1869,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_sub(HARDIRQ_OFFSET); + /* Done - now check the result */ switch (result) { case ES_OK: -- Gitee From c802861aad2b41abe9853c6e5c66567a15e55785 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 6 Jul 2023 05:20:45 +0800 Subject: [PATCH 414/953] anolis: KVM: x86: Calls is_64_bit_hypercall() instead of is_64_bit_mode() in complete_hypercall_exit() ANBZ: #8573 In the commit b5aead0064f3 ("KVM: x86: Assume a 64-bit hypercall for guests with protected state"), is_64_bit_mode() will trigger warning, as the following messages, for SEV-ES or CSV2 guest. [85350.053201] ------------[ cut here ]------------ [85350.053206] WARNING: CPU: 2 PID: 68989 at arch/x86/kvm/x86.h:156 complete_hypercall_exit+0x6a/0x70 [kvm] [85350.053299] Modules linked in: kvm_amd(OE) kvm(OE) ccp(E) irqbypass(E) vhost_net(E) vhost(E) vhost_iotlb(E) tap(E) fuse(E) xt_CHECKSUM(E) xt_MASQUERADE(E) xt_conntrack(E) ipt_REJECT(E) nf_reject_ipv4(E) ip6table_mangle(E) ip6table_nat(E) iptable_mangle(E) iptable_nat(E) nf_nat(E) nf_conntrack(E) nf_defrag_ipv6(E) nf_defrag_ipv4(E) nf_tables(E) libcrc32c(E) nfnetlink(E) ip6table_filter(E) ip6_tables(E) iptable_filter(E) tun(E) bridge(E) stp(E) llc(E) rfkill(E) vfat(E) fat(E) binfmt_misc(E) intel_rapl_msr(E) intel_rapl_common(E) amd64_edac(E) edac_mce_amd(E) crct10dif_pclmul(E) crc32_pclmul(E) acpi_ipmi(E) ipmi_ssif(E) ipmi_si(E) ast(E) joydev(E) mousedev(E) ghash_clmulni_intel(E) rapl(E) ipmi_devintf(E) drm_shmem_helper(E) drm_kms_helper(E) ipmi_msghandler(E) sg(E) k10temp(E) acpi_cpufreq(E) squashfs(E) loop(E) parport_pc(E) ppdev(E) lp(E) parport(E) drm(E) ip_tables(E) sd_mod(E) t10_pi(E) crc64_rocksoft(E) crc64(E) ahci(E) igb(E) i2c_designware_platform(E) libahci(E) i2c_algo_bit(E) dca(E) i2c_piix4(E) [85350.053421] i2c_designware_core(E) crc32c_intel(E) libata(E) i2c_core(E) [last unloaded: kvm(OE)] [85350.053432] CPU: 2 PID: 68989 Comm: qemu-system-x86 Tainted: GF W OE 6.6.7-for-openanolis #5 [85350.053438] Hardware name: HYGON HongHaiA1b/HongHaiA1, BIOS A1633050 02/02/2023 [85350.053441] RIP: 0010:complete_hypercall_exit+0x6a/0x70 [kvm] [85350.053511] Code: e8 9b fb ff ff 48 83 c4 08 5b 5d e9 60 68 68 d8 48 8d 54 24 04 48 89 e6 48 89 ef e8 40 db 12 00 8b 44 24 04 85 c0 74 c4 eb c4 <0f> 0b eb b5 66 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 [85350.053514] RSP: 0018:ffffc90000ea3e28 EFLAGS: 00010202 [85350.053519] RAX: ffff8881419f0000 RBX: 0000000000000000 RCX: ffff8881003ad780 [85350.053522] RDX: 0000606fc0a29bc0 RSI: 00000000fffffe01 RDI: ffff888b5dc20000 [85350.053525] RBP: ffff888b5dc20000 R08: 0000000000000001 R09: 0000000000000000 [85350.053527] R10: ffffc90000ea3ee8 R11: 0000000000000000 R12: ffff88810fe1ea00 [85350.053530] R13: ffff888b5dc20000 R14: ffff888b5dc20048 R15: 0000000000000000 [85350.053532] FS: 00007eff45528700(0000) GS:ffff88903f080000(0000) knlGS:0000000000000000 [85350.053536] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [85350.053539] CR2: 0000000000000000 CR3: 00000001415d2000 CR4: 00000000003506e0 [85350.053541] Call Trace: [85350.053545] [85350.053550] ? __warn+0x84/0x140 [85350.053558] ? complete_hypercall_exit+0x6a/0x70 [kvm] [85350.053627] ? report_bug+0x1bd/0x1d0 [85350.053635] ? handle_bug+0x3c/0x70 [85350.053640] ? exc_invalid_op+0x18/0x70 [85350.053645] ? asm_exc_invalid_op+0x1a/0x20 [85350.053655] ? complete_hypercall_exit+0x6a/0x70 [kvm] [85350.053724] kvm_arch_vcpu_ioctl_run+0x3dd/0x410 [kvm] [85350.053796] kvm_vcpu_ioctl+0x277/0x6c0 [kvm] [85350.053855] __x64_sys_ioctl+0x92/0xd0 [85350.053864] do_syscall_64+0x3f/0x90 [85350.053868] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [85350.053874] RIP: 0033:0x7eff486c33ab [85350.053878] Code: 0f 1e fa 48 8b 05 e5 7a 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d b5 7a 0d 00 f7 d8 64 89 01 48 [85350.053881] RSP: 002b:00007eff45527848 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [85350.053886] RAX: ffffffffffffffda RBX: 000000000000ae80 RCX: 00007eff486c33ab [85350.053888] RDX: 0000000000000000 RSI: 000000000000ae80 RDI: 0000000000000010 [85350.053891] RBP: 0000563586e32430 R08: 0000563584ff1d30 R09: 00007eff455276a4 [85350.053893] R10: 00007eff4552769c R11: 0000000000000246 R12: 0000000000000000 [85350.053896] R13: 00005635856bcd60 R14: 0000000000000000 R15: 0000000000000000 [85350.053904] [85350.053906] ---[ end trace 0000000000000000 ]--- Use is_64_bit_hypercall() instead of is_64_bit_mode() in complete_hypercall_exit() to avoid warning when the SEV-ES or CSV2 guest invoking KVM_HC_MAP_GPA_RANGE hypercall. Fixes: b5aead0064f3 ("KVM: x86: Assume a 64-bit hypercall for guests with protected state") Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 547b5143fbba..ee8e77fa69c6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9850,7 +9850,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) { u64 ret = vcpu->run->hypercall.ret; - if (!is_64_bit_mode(vcpu)) + if (!is_64_bit_hypercall(vcpu)) ret = (u32)ret; kvm_rax_write(vcpu, ret); ++vcpu->stat.hypercalls; -- Gitee From 47f0cbb3c1702adeccd7306d3c76c6b755b4d2ff Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 7 Jan 2024 04:47:42 +0800 Subject: [PATCH 415/953] anolis: x86/head/64: Flush caches for .bss..decrypted section after CR3 switches to early_top_pgt ANBZ: #8573 The memory region of .bss..decrypted section maybe mapped with encryption before early boot stage of Linux. If the correspond stale caches lives in earlier stage were not flushed before we access that memory region in later stages, then Linux will crash because the stale caches will pollute the memory. Fix this issue by flush the caches with encrypted mapping before we access .bss..decrypted section. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kernel/head64.c | 48 +++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/head_64.S | 10 ++++++++ 2 files changed, 58 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index bbc21798df10..08dd5d8932a9 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -319,6 +319,54 @@ unsigned long __head __startup_64(unsigned long physaddr, return sme_postprocess_startup(bp, pmd); } +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern bool bsp_flush_bss_decrypted_section_done; + +void __ref early_clflush_bss_decrypted_section(void) +{ + /* Only allow bsp flush these caches and the bsp must at early boot stage */ + if (bsp_flush_bss_decrypted_section_done) + return; + + if (read_cr3_pa() != __pa_nodebug(early_top_pgt)) + return; + + if (sme_get_me_mask()) { + unsigned long vaddr, vaddr_end; + char *cl, *start, *end; + + /* + * The memory region of .bss..decrypted section maybe mapped + * with encryption in earlier stage. If the correspond stale + * caches lives in earlier stage were not flushed before we + * access that memory region, then Linux will crash later + * because the stale caches will pollute the memory. So we + * need flush the caches with encrypted mapping before we + * access .bss..decrypted section. + * + * The function __startup_64() have already filled the + * encrypted mapping for .bss..decrypted section, use that + * mapping here. + */ + vaddr = (unsigned long)__start_bss_decrypted - + __START_KERNEL_map + phys_base; + vaddr_end = (unsigned long)__end_bss_decrypted - + __START_KERNEL_map + phys_base; + + /* Hardcode cl-size to 64 at this stage. */ + start = (char *)(vaddr & ~63); + end = (char *)((vaddr_end + 63) & ~63); + + asm volatile("mfence" : : : "memory"); + for (cl = start; cl != end; cl += 64) + clflush(cl); + asm volatile("mfence" : : : "memory"); + } + + bsp_flush_bss_decrypted_section_done = true; +} +#endif + /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e6eaee8509ce..9c2d7e2b5edb 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -375,6 +375,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) shrq $32, %rdx wrmsr +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Ensure .bss.decrypted memory's stale caches which lived in earlier + * stage to be flushed. + */ + call early_clflush_bss_decrypted_section +#endif + /* Setup and Load IDT */ call early_setup_idt @@ -511,6 +519,8 @@ SYM_CODE_END(vc_boot_ghcb) SYM_DATA(initial_code, .quad x86_64_start_kernel) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +SYM_DATA(bsp_flush_bss_decrypted_section_done, .byte 0x0) + .balign 8 #endif SYM_DATA(trampoline_lock, .quad 0); -- Gitee From c7e1bc250f66553ada4a5a64d6d4162cc6a1ddc2 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Mon, 16 Oct 2023 08:27:32 -0500 Subject: [PATCH 416/953] KVM: SEV: Do not intercept accesses to MSR_IA32_XSS for SEV-ES guests ANBZ: #8573 commit a26b7cd2254695f8258cc370f33280db0a9a3813 upstream. When intercepts are enabled for MSR_IA32_XSS, the host will swap in/out the guest-defined values while context-switching to/from guest mode. However, in the case of SEV-ES, vcpu->arch.guest_state_protected is set, so the guest-defined value is effectively ignored when switching to guest mode with the understanding that the VMSA will handle swapping in/out this register state. However, SVM is still configured to intercept these accesses for SEV-ES guests, so the values in the initial MSR_IA32_XSS are effectively read-only, and a guest will experience undefined behavior if it actually tries to write to this MSR. Fortunately, only CET/shadowstack makes use of this register on SEV-ES-capable systems currently, which isn't yet widely used, but this may become more of an issue in the future. Additionally, enabling intercepts of MSR_IA32_XSS results in #VC exceptions in the guest in certain paths that can lead to unexpected #VC nesting levels. One example is SEV-SNP guests when handling #VC exceptions for CPUID instructions involving leaf 0xD, subleaf 0x1, since they will access MSR_IA32_XSS as part of servicing the CPUID #VC, then generate another #VC when accessing MSR_IA32_XSS, which can lead to guest crashes if an NMI occurs at that point in time. Running perf on a guest while it is issuing such a sequence is one example where these can be problematic. Address this by disabling intercepts of MSR_IA32_XSS for SEV-ES guests if the host/guest configuration allows it. If the host/guest configuration doesn't allow for MSR_IA32_XSS, leave it intercepted so that it can be caught by the existing checks in kvm_{set,get}_msr_common() if the guest still attempts to access it. Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading") Cc: Alexey Kardashevskiy Suggested-by: Tom Lendacky Signed-off-by: Michael Roth Message-Id: <20231016132819.1002933-4-michael.roth@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/svm/sev.c | 19 +++++++++++++++++++ arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7ec809349b9a..457e0bf79b13 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3259,6 +3259,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm) set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); } + + /* + * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if + * the host/guest supports its use. + * + * guest_can_use() checks a number of requirements on the host/guest to + * ensure that MSR_IA32_XSS is available, but it might report true even + * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host + * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better + * to further check that the guest CPUID actually supports + * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved + * guests will still get intercepted and caught in the normal + * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths. + */ + if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); + else + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); } void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index cf497583b841..820208772b4d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, + { .index = MSR_IA32_XSS, .always = false }, { .index = MSR_EFER, .always = false }, { .index = MSR_IA32_CR_PAT, .always = false }, { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 3be7e827f9b2..0461fbbe103d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -30,7 +30,7 @@ #define IOPM_SIZE PAGE_SIZE * 3 #define MSRPM_SIZE PAGE_SIZE * 2 -#define MAX_DIRECT_ACCESS_MSRS 46 +#define MAX_DIRECT_ACCESS_MSRS 47 #define MSRPM_OFFSETS 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; -- Gitee From 247a0f033be3352978c009a6e46e882c9d61b249 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 3 Feb 2024 13:45:20 +0100 Subject: [PATCH 417/953] KVM: x86: Fix KVM_GET_MSRS stack info leak ANBZ: #8573 commit 3376ca3f1a2075eaa23c5576c47d04d7e8a4adda upstream. Commit 6abe9c1386e5 ("KVM: X86: Move ignore_msrs handling upper the stack") changed the 'ignore_msrs' handling, including sanitizing return values to the caller. This was fine until commit 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs") which allowed non-existing feature MSRs to be ignored, i.e. to not generate an error on the ioctl() level. It even tried to preserve the sanitization of the return value. However, the logic is flawed, as '*data' will be overwritten again with the uninitialized stack value of msr.data. Fix this by simplifying the logic and always initializing msr.data, vanishing the need for an additional error exit path. Fixes: 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs") Signed-off-by: Mathias Krause Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20240203124522.592778-2-minipli@grsecurity.net Signed-off-by: Sean Christopherson Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/x86.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ee8e77fa69c6..7d086e6a8033 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1706,22 +1706,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) struct kvm_msr_entry msr; int r; + /* Unconditionally clear the output for simplicity */ + msr.data = 0; msr.index = index; r = kvm_get_msr_feature(&msr); - if (r == KVM_MSR_RET_INVALID) { - /* Unconditionally clear the output for simplicity */ - *data = 0; - if (kvm_msr_ignored_check(index, 0, false)) - r = 0; - } - - if (r) - return r; + if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) + r = 0; *data = msr.data; - return 0; + return r; } static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) -- Gitee From fb16d21b5c136deda3a2bd93d4b5095b1103113a Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:48 +0800 Subject: [PATCH 418/953] docs: perf: Add description for Synopsys DesignWare PCIe PMU driver ANBZ: #8565 commit cae40614cdd61a6601dc87c6e07c06bf642a125b upstream. Alibaba's T-Head Yitan 710 SoC includes Synopsys' DesignWare Core PCIe controller which implements PMU for performance and functional debugging to facilitate system maintenance. Document it to provide guidance on how to use it. Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Reviewed-by: Jonathan Cameron Reviewed-by: Yicong Yang Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-2-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- .../admin-guide/perf/dwc_pcie_pmu.rst | 94 +++++++++++++++++++ Documentation/admin-guide/perf/index.rst | 1 + 2 files changed, 95 insertions(+) create mode 100644 Documentation/admin-guide/perf/dwc_pcie_pmu.rst diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst new file mode 100644 index 000000000000..d47cd229d710 --- /dev/null +++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst @@ -0,0 +1,94 @@ +====================================================================== +Synopsys DesignWare Cores (DWC) PCIe Performance Monitoring Unit (PMU) +====================================================================== + +DesignWare Cores (DWC) PCIe PMU +=============================== + +The PMU is a PCIe configuration space register block provided by each PCIe Root +Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error +injection, and Statistics). + +As the name indicates, the RAS DES capability supports system level +debugging, AER error injection, and collection of statistics. To facilitate +collection of statistics, Synopsys DesignWare Cores PCIe controller +provides the following two features: + +- one 64-bit counter for Time Based Analysis (RX/TX data throughput and + time spent in each low-power LTSSM state) and +- one 32-bit counter for Event Counting (error and non-error events for + a specified lane) + +Note: There is no interrupt for counter overflow. + +Time Based Analysis +------------------- + +Using this feature you can obtain information regarding RX/TX data +throughput and time spent in each low-power LTSSM state by the controller. +The PMU measures data in two categories: + +- Group#0: Percentage of time the controller stays in LTSSM states. +- Group#1: Amount of data processed (Units of 16 bytes). + +Lane Event counters +------------------- + +Using this feature you can obtain Error and Non-Error information in +specific lane by the controller. The PMU event is selected by all of: + +- Group i +- Event j within the Group i +- Lane k + +Some of the events only exist for specific configurations. + +DesignWare Cores (DWC) PCIe PMU Driver +======================================= + +This driver adds PMU devices for each PCIe Root Port named based on the BDF of +the Root Port. For example, + + 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) + +the PMU device name for this Root Port is dwc_rootport_3018. + +The DWC PCIe PMU driver registers a perf PMU driver, which provides +description of available events and configuration options in sysfs, see +/sys/bus/event_source/devices/dwc_rootport_{bdf}. + +The "format" directory describes format of the config fields of the +perf_event_attr structure. The "events" directory provides configuration +templates for all documented events. For example, +"Rx_PCIe_TLP_Data_Payload" is an equivalent of "eventid=0x22,type=0x1". + +The "perf list" command shall list the available events from sysfs, e.g.:: + + $# perf list | grep dwc_rootport + <...> + dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] + <...> + dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event] + +Time Based Analysis Event Usage +------------------------------- + +Example usage of counting PCIe RX TLP data payload (Units of bytes):: + + $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ + +The average RX/TX bandwidth can be calculated using the following formula: + + PCIe RX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window + PCIe TX Bandwidth = Tx_PCIe_TLP_Data_Payload / Measure_Time_Window + +Lane Event Usage +------------------------------- + +Each lane has the same event set and to avoid generating a list of hundreds +of events, the user need to specify the lane ID explicitly, e.g.:: + + $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/ + +The driver does not support sampling, therefore "perf record" will not +work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst index f60be04e4e33..6bc7739fddb5 100644 --- a/Documentation/admin-guide/perf/index.rst +++ b/Documentation/admin-guide/perf/index.rst @@ -19,6 +19,7 @@ Performance monitor support arm_dsu_pmu thunderx2-pmu alibaba_pmu + dwc_pcie_pmu nvidia-pmu meson-ddr-pmu cxl -- Gitee From 823a094840f397dece25414ebef29056c2f5a2ba Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:49 +0800 Subject: [PATCH 419/953] PCI: Add Alibaba Vendor ID to linux/pci_ids.h ANBZ: #8565 commit ad6534c626fedd818718d76c36d69c7d8e7b61cc upstream. The Alibaba Vendor ID (0x1ded) is now used by Alibaba elasticRDMA ("erdma") and will be shared with the upcoming PCIe PMU ("dwc_pcie_pmu"). Move the Vendor ID to linux/pci_ids.h so that it can shared by several drivers later. Signed-off-by: Shuai Xue Acked-by: Bjorn Helgaas # pci_ids.h Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-3-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/infiniband/hw/erdma/erdma_hw.h | 2 -- include/linux/pci_ids.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 9d316fdc6f9a..a155519a862f 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -11,8 +11,6 @@ #include /* PCIe device related definition. */ -#define PCI_VENDOR_ID_ALIBABA 0x1ded - #define ERDMA_PCI_WIDTH 64 #define ERDMA_FUNC_BAR 0 #define ERDMA_MISX_BAR 2 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 966f465f825f..0e90ad63dace 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2606,6 +2606,8 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_ALIBABA 0x1ded + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 -- Gitee From 639e303b9c8291c45757ecfd18236ecca733e819 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:50 +0800 Subject: [PATCH 420/953] PCI: Move pci_clear_and_set_dword() helper to PCI header ANBZ: #8565 commit ac16087134b837d42b75bb1c741070b6c142f258 upstream. The clear and set pattern is commonly used for accessing PCI config, move the helper pci_clear_and_set_dword() from aspm.c into PCI header. In addition, rename to pci_clear_and_set_config_dword() to retain the "config" information and match the other accessors. No functional change intended. Signed-off-by: Shuai Xue Acked-by: Bjorn Helgaas Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-4-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/pci/access.c | 12 ++++++++ drivers/pci/pcie/aspm.c | 65 +++++++++++++++++++---------------------- include/linux/pci.h | 2 ++ 3 files changed, 44 insertions(+), 35 deletions(-) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d36..6449056b57dd 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 7e3b342215e5..8272c7b24d0f 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -423,17 +423,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -494,10 +483,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -505,22 +496,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -680,10 +675,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -706,10 +701,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) diff --git a/include/linux/pci.h b/include/linux/pci.h index b548d5646a86..f9a04587e4a9 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1213,6 +1213,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set); int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); -- Gitee From 6399c36ef447269aa4d538ddefab69dc8906a44a Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:51 +0800 Subject: [PATCH 421/953] drivers/perf: add DesignWare PCIe PMU driver ANBZ: #8565 commit af9597adc2f1e3609c67c9792a2469bb64e43ae9 upstream. This commit adds the PCIe Performance Monitoring Unit (PMU) driver support for T-Head Yitian SoC chip. Yitian is based on the Synopsys PCI Express Core controller IP which provides statistics feature. The PMU is a PCIe configuration space register block provided by each PCIe Root Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error injection, and Statistics). To facilitate collection of statistics the controller provides the following two features for each Root Port: - one 64-bit counter for Time Based Analysis (RX/TX data throughput and time spent in each low-power LTSSM state) and - one 32-bit counter for Event Counting (error and non-error events for a specified lane) Note: There is no interrupt for counter overflow. This driver adds PMU devices for each PCIe Root Port. And the PMU device is named based the BDF of Root Port. For example, 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) the PMU device name for this Root Port is dwc_rootport_3018. Example usage of counting PCIe RX TLP data payload (Units of bytes):: $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ average RX bandwidth can be calculated like this: PCIe TX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Reviewed-by: Jonathan Cameron Reviewed-by: Yicong Yang Reviewed-and-tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-5-xueshuai@linux.alibaba.com [will: Fix sparse error due to use of uninitialised 'vsec' symbol in dwc_pcie_match_des_cap()] Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/perf/Kconfig | 7 + drivers/perf/Makefile | 1 + drivers/perf/dwc_pcie_pmu.c | 792 ++++++++++++++++++++++++++++++++++++ 3 files changed, 800 insertions(+) create mode 100644 drivers/perf/dwc_pcie_pmu.c diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 273d67ecf6d2..ec6e0d9194a1 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. +config DWC_PCIE_PMU + tristate "Synopsys DesignWare PCIe PMU" + depends on PCI + help + Enable perf support for Synopsys DesignWare PCIe PMU Performance + monitoring event on platform including the Alibaba Yitian 710. + source "drivers/perf/arm_cspmu/Kconfig" source "drivers/perf/amlogic/Kconfig" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16b3ec4db916..a06338e3401c 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o +obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c new file mode 100644 index 000000000000..957058ad0099 --- /dev/null +++ b/drivers/perf/dwc_pcie_pmu.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe PMU driver + * + * Copyright (C) 2021-2023 Alibaba Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 +#define DWC_PCIE_EVENT_CNT_CTL 0x8 + +/* + * Event Counter Data Select includes two parts: + * - 27-24: Group number(4-bit: 0..0x7) + * - 23-16: Event number(8-bit: 0..0x13) within the Group + * + * Put them together as in TRM. + */ +#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16) +#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8) +#define DWC_PCIE_CNT_STATUS BIT(7) +#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2) +#define DWC_PCIE_PER_EVENT_OFF 0x1 +#define DWC_PCIE_PER_EVENT_ON 0x3 +#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0) +#define DWC_PCIE_EVENT_PER_CLEAR 0x1 + +#define DWC_PCIE_EVENT_CNT_DATA 0xC + +#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10 +#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24) +#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8) +#define DWC_PCIE_DURATION_MANUAL_CTL 0x0 +#define DWC_PCIE_DURATION_1MS 0x1 +#define DWC_PCIE_DURATION_10MS 0x2 +#define DWC_PCIE_DURATION_100MS 0x3 +#define DWC_PCIE_DURATION_1S 0x4 +#define DWC_PCIE_DURATION_2S 0x5 +#define DWC_PCIE_DURATION_4S 0x6 +#define DWC_PCIE_DURATION_4US 0xFF +#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0) +#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1 + +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14 +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18 + +/* Event attributes */ +#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0) +#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16) +#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20) + +#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config) +#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config) +#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config) + +enum dwc_pcie_event_type { + DWC_PCIE_TIME_BASE_EVENT, + DWC_PCIE_LANE_EVENT, + DWC_PCIE_EVENT_TYPE_MAX, +}; + +#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0) +#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0) + +struct dwc_pcie_pmu { + struct pmu pmu; + struct pci_dev *pdev; /* Root Port device */ + u16 ras_des_offset; + u32 nr_lanes; + + struct list_head pmu_node; + struct hlist_node cpuhp_node; + struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; + int on_cpu; +}; + +#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu)) + +static int dwc_pcie_pmu_hp_state; +static struct list_head dwc_pcie_dev_info_head = + LIST_HEAD_INIT(dwc_pcie_dev_info_head); +static bool notify; + +struct dwc_pcie_dev_info { + struct platform_device *plat_dev; + struct pci_dev *pdev; + struct list_head dev_node; +}; + +struct dwc_pcie_vendor_id { + int vendor_id; +}; + +static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { + {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {} /* terminator */ +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group dwc_pcie_cpumask_attr_group = { + .attrs = dwc_pcie_pmu_cpumask_attrs, +}; + +struct dwc_pcie_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +PMU_FORMAT_ATTR(eventid, "config:0-15"); +PMU_FORMAT_ATTR(type, "config:16-19"); +PMU_FORMAT_ATTR(lane, "config:20-27"); + +static struct attribute *dwc_pcie_format_attrs[] = { + &format_attr_type.attr, + &format_attr_eventid.attr, + &format_attr_lane.attr, + NULL, +}; + +static struct attribute_group dwc_pcie_format_attrs_group = { + .name = "format", + .attrs = dwc_pcie_format_attrs, +}; + +struct dwc_pcie_event_attr { + struct device_attribute attr; + enum dwc_pcie_event_type type; + u16 eventid; + u8 lane; +}; + +static ssize_t dwc_pcie_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc_pcie_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == DWC_PCIE_LANE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", + eattr->eventid, eattr->type); + else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n", + eattr->eventid, eattr->type); + + return 0; +} + +#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \ + (&((struct dwc_pcie_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .lane = _lane, \ + }})[0].attr.attr) + +#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0) +#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0) + +static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { + /* Group #0 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + + /* Group #1 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + + /* + * Leave it to the user to specify the lane ID to avoid generating + * a list of hundreds of events. + */ + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717), + NULL +}; + +static const struct attribute_group dwc_pcie_event_attrs_group = { + .name = "events", + .attrs = dwc_pcie_pmu_time_event_attrs, +}; + +static const struct attribute_group *dwc_pcie_attr_groups[] = { + &dwc_pcie_event_attrs_group, + &dwc_pcie_format_attrs_group, + &dwc_pcie_cpumask_attr_group, + NULL +}; + +static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + if (enable) + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON); + else + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF); +} + +static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, + DWC_PCIE_TIME_BASED_TIMER_START, enable); +} + +static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 val; + + pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val); + + return val; +} + +static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + int event_id = DWC_PCIE_EVENT_ID(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 lo, hi, ss; + u64 val; + + /* + * The 64-bit value of the data counter is spread across two + * registers that are not synchronized. In order to read them + * atomically, ensure that the high 32 bits match before and after + * reading the low 32 bits. + */ + pci_read_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi); + do { + /* snapshot the high 32 bits */ + ss = hi; + + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW, + &lo); + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, + &hi); + } while (hi != ss); + + val = ((u64)hi << 32) | lo; + /* + * The Group#1 event measures the amount of data processed in 16-byte + * units. Simplify the end-user interface by multiplying the counter + * at the point of read. + */ + if (event_id >= 0x20 && event_id <= 0x23) + val *= 16; + + return val; +} + +static void dwc_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + u64 delta, prev, now = 0; + + do { + prev = local64_read(&hwc->prev_count); + + if (type == DWC_PCIE_LANE_EVENT) + now = dwc_pcie_pmu_read_lane_event_counter(event); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + now = dwc_pcie_pmu_read_time_based_counter(event); + + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + delta = (now - prev) & DWC_PCIE_MAX_PERIOD; + /* 32-bit counter for Lane Event Counting */ + if (type == DWC_PCIE_LANE_EVENT) + delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD; + + local64_add(delta, &event->count); +} + +static int dwc_pcie_pmu_event_init(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct perf_event *sibling; + u32 lane; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) + return -EINVAL; + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->group_leader != event && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return -EINVAL; + } + + if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX) + return -EINVAL; + + if (type == DWC_PCIE_LANE_EVENT) { + lane = DWC_PCIE_EVENT_LANE(event); + if (lane < 0 || lane >= pcie_pmu->nr_lanes) + return -EINVAL; + } + + event->cpu = pcie_pmu->on_cpu; + + return 0; +} + +static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + hwc->state = 0; + local64_set(&hwc->prev_count, 0); + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, true); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true); +} + +static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->hw.state & PERF_HES_STOPPED) + return; + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, false); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false); + + dwc_pcie_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + int event_id = DWC_PCIE_EVENT_ID(event); + int lane = DWC_PCIE_EVENT_LANE(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 ctrl; + + /* one counter for each type and it is in use */ + if (pcie_pmu->event[type]) + return -ENOSPC; + + pcie_pmu->event[type] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (type == DWC_PCIE_LANE_EVENT) { + /* EVENT_COUNTER_DATA_REG needs clear manually */ + ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | + FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) | + FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR); + pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + ctrl); + } else if (type == DWC_PCIE_TIME_BASE_EVENT) { + /* + * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely + * use it with any manually controlled duration. And it is + * cleared when next measurement starts. + */ + ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL, + DWC_PCIE_DURATION_MANUAL_CTL) | + DWC_PCIE_TIME_BASED_CNT_ENABLE; + pci_write_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl); + } + + if (flags & PERF_EF_START) + dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE); + perf_event_update_userpage(event); + pcie_pmu->event[type] = NULL; +} + +static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node); +} + +/* + * Find the binded DES capability device info of a PCI device. + * @pdev: The PCI device. + */ +static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev) +{ + struct dwc_pcie_dev_info *dev_info; + + list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node) + if (dev_info->pdev == pdev) + return dev_info; + + return NULL; +} + +static void dwc_pcie_unregister_pmu(void *data) +{ + struct dwc_pcie_pmu *pcie_pmu = data; + + perf_pmu_unregister(&pcie_pmu->pmu); +} + +static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +{ + const struct dwc_pcie_vendor_id *vid; + u16 vsec = 0; + u32 val; + + if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) + return false; + + for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + vsec = pci_find_vsec_capability(pdev, vid->vendor_id, + DWC_PCIE_VSEC_RAS_DES_ID); + if (vsec) + break; + } + if (!vsec) + return false; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x04) + return false; + + pci_dbg(pdev, + "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return true; +} + +static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) +{ + platform_device_unregister(dev_info->plat_dev); + list_del(&dev_info->dev_node); + kfree(dev_info); +} + +static int dwc_pcie_register_dev(struct pci_dev *pdev) +{ + struct platform_device *plat_dev; + struct dwc_pcie_dev_info *dev_info; + u32 bdf; + + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + pdev, sizeof(*pdev)); + + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + /* Cache platform device to handle pci device hotplug */ + dev_info->plat_dev = plat_dev; + dev_info->pdev = pdev; + list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); + + return 0; +} + +static int dwc_pcie_pmu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dwc_pcie_dev_info *dev_info; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (!dwc_pcie_match_des_cap(pdev)) + return NOTIFY_DONE; + if (dwc_pcie_register_dev(pdev)) + return NOTIFY_BAD; + break; + case BUS_NOTIFY_DEL_DEVICE: + dev_info = dwc_pcie_find_dev_info(pdev); + if (!dev_info) + return NOTIFY_DONE; + dwc_pcie_unregister_dev(dev_info); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block dwc_pcie_pmu_nb = { + .notifier_call = dwc_pcie_pmu_notifier, +}; + +static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) +{ + struct pci_dev *pdev = plat_dev->dev.platform_data; + struct dwc_pcie_pmu *pcie_pmu; + char *name; + u32 bdf, val; + u16 vsec; + int ret; + + vsec = pci_find_vsec_capability(pdev, pdev->vendor, + DWC_PCIE_VSEC_RAS_DES_ID); + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + if (!name) + return -ENOMEM; + + pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->ras_des_offset = vsec; + pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); + pcie_pmu->on_cpu = -1; + pcie_pmu->pmu = (struct pmu){ + .name = name, + .parent = &pdev->dev, + .module = THIS_MODULE, + .attr_groups = dwc_pcie_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .event_init = dwc_pcie_pmu_event_init, + .add = dwc_pcie_pmu_event_add, + .del = dwc_pcie_pmu_event_del, + .start = dwc_pcie_pmu_event_start, + .stop = dwc_pcie_pmu_event_stop, + .read = dwc_pcie_pmu_event_update, + }; + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, + &pcie_pmu->cpuhp_node); + if (ret) { + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + return ret; + } + + /* Unwind when platform driver removes */ + ret = devm_add_action_or_reset(&plat_dev->dev, + dwc_pcie_pmu_remove_cpuhp_instance, + &pcie_pmu->cpuhp_node); + if (ret) + return ret; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + return ret; + } + ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, + pcie_pmu); + if (ret) + return ret; + + return 0; +} + +static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + if (pcie_pmu->on_cpu == -1) + pcie_pmu->on_cpu = cpumask_local_spread( + 0, dev_to_node(&pcie_pmu->pdev->dev)); + + return 0; +} + +static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + struct pci_dev *pdev; + int node; + cpumask_t mask; + unsigned int target; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + /* Nothing to do if this CPU doesn't own the PMU */ + if (cpu != pcie_pmu->on_cpu) + return 0; + + pcie_pmu->on_cpu = -1; + pdev = pcie_pmu->pdev; + node = dev_to_node(&pdev->dev); + if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(&mask, &mask, cpumask_of(cpu))) + target = cpumask_any(&mask); + else + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pci_err(pdev, "There is no CPU to set\n"); + return 0; + } + + /* This PMU does NOT support interrupt, just migrate context. */ + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + pcie_pmu->on_cpu = target; + + return 0; +} + +static struct platform_driver dwc_pcie_pmu_driver = { + .probe = dwc_pcie_pmu_probe, + .driver = {.name = "dwc_pcie_pmu",}, +}; + +static int __init dwc_pcie_pmu_init(void) +{ + struct pci_dev *pdev = NULL; + bool found = false; + int ret; + + for_each_pci_dev(pdev) { + if (!dwc_pcie_match_des_cap(pdev)) + continue; + + ret = dwc_pcie_register_dev(pdev); + if (ret) { + pci_dev_put(pdev); + return ret; + } + + found = true; + } + if (!found) + return -ENODEV; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/dwc_pcie_pmu:online", + dwc_pcie_pmu_online_cpu, + dwc_pcie_pmu_offline_cpu); + if (ret < 0) + return ret; + + dwc_pcie_pmu_hp_state = ret; + + ret = platform_driver_register(&dwc_pcie_pmu_driver); + if (ret) + goto platform_driver_register_err; + + ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + if (ret) + goto platform_driver_register_err; + notify = true; + + return 0; + +platform_driver_register_err: + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); + + return ret; +} + +static void __exit dwc_pcie_pmu_exit(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + if (notify) + bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) + dwc_pcie_unregister_dev(dev_info); + platform_driver_unregister(&dwc_pcie_pmu_driver); + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); +} + +module_init(dwc_pcie_pmu_init); +module_exit(dwc_pcie_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller"); +MODULE_AUTHOR("Shuai Xue "); +MODULE_LICENSE("GPL v2"); -- Gitee From e229fff7187e7b21e629cf820de98cd3f84ac468 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:45 +0800 Subject: [PATCH 422/953] perf pmu: "Compat" supports regular expression matching identifiers ANBZ: #8601 commit 2879ff36f5ed80deec5f9d82a7a4107f2347630e upstream. The jevent "Compat" is used for uncore PMU alias or metric definitions. The same PMU driver has different PMU identifiers due to different hardware versions and types, but they may have some common PMU event. Since a Compat value can only match one identifier, when adding the same event alias to PMUs with different identifiers, each identifier needs to be defined once, which is not streamlined enough. So let "Compat" support using regular expression to match identifiers for uncore PMU alias. For example, if the "Compat" value is set to "43401|43c01", it would be able to match PMU identifiers such as "43401" or "43c01", which correspond to CMN600_r0p0 or CMN700_r0p0. Signed-off-by: Jing Zhang Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: John Garry Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-2-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/util/pmu.c | 27 +++++++++++++++++++++++++-- tools/perf/util/pmu.h | 1 + 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 86bfdf5db213..fd2986e1629d 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -28,6 +28,7 @@ #include "strbuf.h" #include "fncache.h" #include "util/evsel_config.h" +#include struct perf_pmu perf_pmu__fake = { .name = "fake", @@ -874,6 +875,28 @@ static bool pmu_uncore_alias_match(const char *pmu_name, const char *name) return res; } +bool pmu_uncore_identifier_match(const char *compat, const char *id) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + + if (regcomp(&re, compat, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", compat); + return false; + } + + match = !regexec(&re, id, 1, pmatch, 0); + if (match) { + /* Ensure a full match. */ + match = pmatch[0].rm_so == 0 && (size_t)pmatch[0].rm_eo == strlen(id); + } + regfree(&re); + + return match; +} + static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe, const struct pmu_events_table *table __maybe_unused, void *vdata) @@ -914,8 +937,8 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, if (!pe->compat || !pe->pmu) return 0; - if (!strcmp(pmu->id, pe->compat) && - pmu_uncore_alias_match(pe->pmu, pmu->name)) { + if (pmu_uncore_alias_match(pe->pmu, pmu->name) && + pmu_uncore_identifier_match(pe->compat, pmu->id)) { perf_pmu__new_alias(pmu, pe->name, pe->desc, diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 6a4e170c61d6..fa54010eae3f 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -240,6 +240,7 @@ void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, char *perf_pmu__getcpuid(struct perf_pmu *pmu); const struct pmu_events_table *pmu_events_table__find(void); const struct pmu_metrics_table *pmu_metrics_table__find(void); +bool pmu_uncore_identifier_match(const char *compat, const char *id); int perf_pmu__convert_scale(const char *scale, char **end, double *sval); -- Gitee From e7f0787a7ce6e298d2697dffbc9c0f68d088f087 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:46 +0800 Subject: [PATCH 423/953] perf metric: "Compat" supports regular expression matching identifiers ANBZ: #8601 commit 54409997d4b99ab63616bd431cf6244d58f8a597 upstream. The jevent "Compat" is used for uncore PMU alias or metric definitions. The same PMU driver has different PMU identifiers due to different hardware versions and types, but they may have some common PMU metric. Since a Compat value can only match one identifier, when adding the same metric to PMUs with different identifiers, each identifier needs to be defined once, which is not streamlined enough. So let "Compat" support using regular expression to match multiple identifiers for uncore PMU metric. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-3-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/util/metricgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index bb5faaa25d51..ca3e0404f187 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm, while ((pmu = perf_pmus__scan(pmu))) { - if (!pmu->id || strcmp(pmu->id, pm->compat)) + if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id)) continue; return d->fn(pm, table, d->data); -- Gitee From bd28608902e28c7345467ae78a44906f4d0e1247 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:47 +0800 Subject: [PATCH 424/953] perf jevents: Support EventidCode and NodeType ANBZ: #8601 commit e3e42e23c0c6e791a00eb8331dc948f316e6de1f upstream. The previous code assumes an event has either an "event=" or "config" field at the beginning. For CMN neither of these may be present, as an event is typically "type=xx,eventid=xxx". So add EventidCode and NodeType to support CMN event description. I compared pmu_event.c before and after compiling with JEVENT_ARCH=all, they are consistent. Signed-off-by: Jing Zhang Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: John Garry Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-4-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/pmu-events/jevents.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 72ba4a9239c6..f11c5c39d2c9 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -298,6 +298,7 @@ class JsonEvent: if 'ExtSel' in jd: eventcode |= int(jd['ExtSel']) << 8 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None + eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None self.name = jd['EventName'].lower() if 'EventName' in jd else None self.topic = '' self.compat = jd.get('Compat') @@ -335,7 +336,13 @@ class JsonEvent: if precise and self.desc and '(Precise Event)' not in self.desc: extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 'event)') - event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' + event = None + if configcode is not None: + event = f'config={llx(configcode)}' + elif eventidcode is not None: + event = f'eventid={llx(eventidcode)}' + else: + event = f'event={llx(eventcode)}' event_fields = [ ('AnyThread', 'any='), ('PortMask', 'ch_mask='), @@ -345,6 +352,7 @@ class JsonEvent: ('Invert', 'inv='), ('SampleAfterValue', 'period='), ('UMask', 'umask='), + ('NodeType', 'type='), ] for key, value in event_fields: if key in jd and jd[key] != '0': -- Gitee From 3d80215a15959ff328bfce413a13ffab74f940d3 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:48 +0800 Subject: [PATCH 425/953] perf test: Make matching_pmu effective ANBZ: #8601 commit 3bb59e759cbb357f8fb46cc5a48d2b0da09b37c4 upstream. The perf_pmu_test_event.matching_pmu didn't work. No matter what its value is, it does not affect the test results. So let matching_pmu be used for matching perf_pmu_test_pmu.pmu.name. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-5-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/tests/pmu-events.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index f5321fbdee79..0cf572f7c1e7 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -245,7 +245,7 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = { }, .alias_str = "event=0x2b", .alias_long_desc = "ddr write-cycles event", - .matching_pmu = "uncore_sys_ddr_pmu", + .matching_pmu = "uncore_sys_ddr_pmu0", }; static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { @@ -259,7 +259,7 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { }, .alias_str = "config=0x2c", .alias_long_desc = "ccn read-cycles event", - .matching_pmu = "uncore_sys_ccn_pmu", + .matching_pmu = "uncore_sys_ccn_pmu4", }; static const struct perf_pmu_test_event *sys_events[] = { @@ -615,6 +615,12 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu) .count = &matched_count, }; + if (strcmp(pmu_name, test_event.matching_pmu)) { + pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n", + pmu_name, test_event.matching_pmu, pmu_name); + return -1; + } + err = perf_pmu__find_event(pmu, event->name, &args, test_core_pmu_event_aliases_cb); if (err) { -- Gitee From 83d78bbe06a9d1ad8fe76c91ae3f20ef8c4047c6 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:49 +0800 Subject: [PATCH 426/953] perf test: Add pmu-event test for "Compat" and new event_field. ANBZ: #8601 commit 7fded33c6971b6c8e87cbbf48e74536aacca2991 upstream. Add new event test for uncore system event which is used to verify the functionality of "Compat" matching multiple identifiers and the new event fields "EventidCode" and "NodeType". Signed-off-by: Jing Zhang Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: John Garry Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-6-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/test/test_soc/sys/uncore.json | 8 +++ tools/perf/pmu-events/empty-pmu-events.c | 8 +++ tools/perf/tests/pmu-events.c | 55 +++++++++++++++++++ 3 files changed, 71 insertions(+) diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json index c7e7528db315..4d423b149ad1 100644 --- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json +++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json @@ -12,5 +12,13 @@ "EventName": "sys_ccn_pmu.read_cycles", "Unit": "sys_ccn_pmu", "Compat": "0x01" + }, + { + "BriefDescription": "Counts total cache misses in first lookup result (high priority)", + "EventidCode": "0x1", + "NodeType": "0x5", + "EventName": "sys_cmn_pmu.hnf_cache_miss", + "Unit": "sys_cmn_pmu", + "Compat": "(434|436|43c|43a).*" } ] diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c index 12bd043a05e3..13727421d424 100644 --- a/tools/perf/pmu-events/empty-pmu-events.c +++ b/tools/perf/pmu-events/empty-pmu-events.c @@ -244,6 +244,14 @@ static const struct pmu_event pmu_events__test_soc_sys[] = { .topic = "uncore", .pmu = "uncore_sys_ccn_pmu", }, + { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ", + .compat = "(434|436|43c|43a).*", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + }, { .name = 0, .event = 0, diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index 0cf572f7c1e7..a56d32905743 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -262,9 +262,24 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { .matching_pmu = "uncore_sys_ccn_pmu4", }; +static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = { + .event = { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority)", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + .compat = "(434|436|43c|43a).*", + }, + .alias_str = "eventid=0x1,type=0x5", + .alias_long_desc = "Counts total cache misses in first lookup result (high priority)", + .matching_pmu = "uncore_sys_cmn_pmu0", +}; + static const struct perf_pmu_test_event *sys_events[] = { &sys_ddr_pmu_write_cycles, &sys_ccn_pmu_read_cycles, + &sys_cmn_pmu_hnf_cache_miss, NULL }; @@ -707,6 +722,46 @@ static struct perf_pmu_test_pmu test_pmus[] = { &sys_ccn_pmu_read_cycles, }, }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43401", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43602", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43c03", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43a01", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + } }; /* Test that aliases generated are as expected */ -- Gitee From 5cb03e7b1334b88e704e57f2fb8de344b0787efd Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:50 +0800 Subject: [PATCH 427/953] perf jevents: Add support for Arm CMN PMU aliasing ANBZ: #8601 commit 0b4de7bdf46c521518e38579d0ab5600a6949bec upstream. Currently just add aliases for part of Arm CMN PMU events which are general and compatible for any SoC and CMN-ANY. "Compat" value "(434|436|43c|43a).*" means it is compatible with all CMN600/CMN650/CMN700/Ci700, which can be obtained from commit 7819e05a0dce ("perf/arm-cmn: Revamp model detection"). The arm-cmn PMU events got from: [0] https://developer.arm.com/documentation/100180/0302/?lang=en [1] https://developer.arm.com/documentation/101408/0100/?lang=en [2] https://developer.arm.com/documentation/102308/0302/?lang=en [3] https://developer.arm.com/documentation/101569/0300/?lang=en Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-7-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/arm64/arm/cmn/sys/cmn.json | 266 ++++++++++++++++++ tools/perf/pmu-events/jevents.py | 1 + 2 files changed, 267 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json new file mode 100644 index 000000000000..428605c37d10 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "hnf_cache_miss", + "EventidCode": "0x1", + "NodeType": "0x5", + "BriefDescription": "Counts total cache misses in first lookup result (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_sf_cache_access", + "EventidCode": "0x2", + "NodeType": "0x5", + "BriefDescription": "Counts number of cache accesses in first access (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_cache_fill", + "EventidCode": "0x3", + "NodeType": "0x5", + "BriefDescription": "Counts total allocations in HN SLC (all cache line allocations to SLC).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_retry", + "EventidCode": "0x4", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried requests.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_reqs_recvd", + "EventidCode": "0x5", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that HN receives.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_hit", + "EventidCode": "0x6", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF hits.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_evictions", + "EventidCode": "0x7", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF eviction cache invalidations initiated.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_dir_snoops_sent", + "EventidCode": "0x8", + "NodeType": "0x5", + "BriefDescription": "Counts number of directed snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_brd_snoops_sent", + "EventidCode": "0x9", + "NodeType": "0x5", + "BriefDescription": "Counts number of multicast snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_eviction", + "EventidCode": "0xa", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC evictions (dirty only).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_fill_invalid_way", + "EventidCode": "0xb", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC fills to an invalid way.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_retries", + "EventidCode": "0xc", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried transactions by the MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_reqs", + "EventidCode": "0xd", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that are sent to MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_qos_hh_retry", + "EventidCode": "0xe", + "NodeType": "0x5", + "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s0_rdata_beats", + "EventidCode": "0x1", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 0. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s1_rdata_beats", + "EventidCode": "0x2", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 1. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s2_rdata_beats", + "EventidCode": "0x3", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 2. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_rxdat_flits", + "EventidCode": "0x4", + "NodeType": "0xa", + "BriefDescription": "Number of RXDAT flits received. This event measures the true read data bandwidth, excluding CMOs.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txdat_flits", + "EventidCode": "0x5", + "NodeType": "0xa", + "BriefDescription": "Number of TXDAT flits dispatched. This event measures the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_total", + "EventidCode": "0x6", + "NodeType": "0xa", + "BriefDescription": "Number of TXREQ flits dispatched. This event measures the total request bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_retried", + "EventidCode": "0x7", + "NodeType": "0xa", + "BriefDescription": "Number of retried TXREQ flits dispatched. This event measures the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txrsp_retryack", + "EventidCode": "0x4", + "NodeType": "0x7", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txdat_flitv", + "EventidCode": "0x5", + "NodeType": "0x7", + "BriefDescription": "Number of TXDAT flits dispatched from XP to SBSX. This event is a measure of the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_arvalid_no_arready", + "EventidCode": "0x21", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_awvalid_no_awready", + "EventidCode": "0x22", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_wvalid_no_wready", + "EventidCode": "0x23", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txrsp_retryack", + "EventidCode": "0x2a", + "NodeType": "0x4", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arvalid_no_arready", + "EventidCode": "0x2b", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arready_no_arvalid", + "EventidCode": "0x2c", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AR channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awvalid_no_awready", + "EventidCode": "0x2d", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awready_no_awvalid", + "EventidCode": "0x2e", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AW channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_wvalid_no_wready", + "EventidCode": "0x2f", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txdat_stall", + "EventidCode": "0x30", + "NodeType": "0x4", + "BriefDescription": "TXDAT valid but no link credit available.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index f11c5c39d2c9..ae2bd49e8805 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -289,6 +289,7 @@ class JsonEvent: 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', 'ali_drw': 'ali_drw', + 'arm_cmn': 'arm_cmn', } return table[unit] if unit in table else f'uncore_{unit.lower()}' -- Gitee From e5e1f364daae0c9cef67b3553e9c75fc0ee29529 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:51 +0800 Subject: [PATCH 428/953] perf vendor events: Add JSON metrics for Arm CMN ANBZ: #8601 commit 4f3ee7d1d5ced888e603c7fbe48e3468320745c1 upstream. Add JSON metrics for Arm CMN. Currently just add part of CMN PMU metrics which are general and compatible for any SoC with CMN-ANY. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-8-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/arm64/arm/cmn/sys/metric.json | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json new file mode 100644 index 000000000000..f7823bd265db --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json @@ -0,0 +1,74 @@ +[ + { + "MetricName": "slc_miss_rate", + "BriefDescription": "The system level cache miss rate.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_cache_miss / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "hnf_message_retry_rate", + "BriefDescription": "HN-F message retry rate indicates whether a lack of credits is causing the bottlenecks.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_pocq_retry / hnf_pocq_reqs_recvd", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sf_hit_rate", + "BriefDescription": "Snoop filter hit rate can be used to measure the snoop filter efficiency.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_sf_hit / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "mc_message_retry_rate", + "BriefDescription": "The memory controller request retries rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_mc_retries / hnf_mc_reqs", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_read_bandwidth.all", + "BriefDescription": "This event measure the actual bandwidth that RN-I bridge sends to the interconnect.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_rxdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_write_bandwidth.all", + "BriefDescription": "This event measures the actual write bandwidth at RN-I bridges.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_retry_rate", + "BriefDescription": "RN-I bridge retry rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txreq_flits_retried / rnid_txreq_flits_total", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sbsx_actual_write_bandwidth.all", + "BriefDescription": "sbsx actual write bandwidth.", + "MetricGroup": "cmn", + "MetricExpr": "sbsx_txdat_flitv * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] -- Gitee From 6eec80257cb6b60350e0e2f0628460d1b7e8ead4 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Fri, 6 Mar 2020 11:20:49 +0800 Subject: [PATCH 429/953] anolis: sched/fair: Introduce primitives for CFS bandwidth burst ANBZ: #8586 In this patch, we introduce the notion of CFS bandwidth burst. Unused "quota" from pervious "periods" might be accumulated and used in the following "periods". The maximum amount of accumulated bandwidth is bounded by "burst". And the maximun amount of CPU a group can consume in a given period is "buffer" which is equivalent to "quota" + "burst in case that this group has done enough accumulation. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 36 +++++++++++++++++++++++++++--------- kernel/sched/fair.c | 1 + kernel/sched/sched.h | 1 + 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 46e13ea43fe4..e0ca336c9382 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10981,6 +10981,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + u64 buffer; if (tg == &root_task_group) return -EINVAL; @@ -11011,6 +11012,16 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, burst + quota > max_cfs_runtime)) return -EINVAL; + /* + * Bound burst to defend burst against overflow during bandwidth shift. + */ + if (burst > max_cfs_runtime) + return -EINVAL; + + if (quota == RUNTIME_INF) + buffer = RUNTIME_INF; + else + buffer = min(max_cfs_runtime, quota + burst); /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -11035,6 +11046,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; cfs_b->burst = burst; + cfs_b->buffer = buffer; __refill_cfs_bandwidth_runtime(cfs_b); @@ -11121,7 +11133,11 @@ static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { u64 quota, period, burst; - if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + if (cfs_burst_us < 0) + burst = RUNTIME_INF; + else if ((u64)cfs_burst_us <= U64_MAX / NSEC_PER_USEC) + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + else return -EINVAL; burst = (u64)cfs_burst_us * NSEC_PER_USEC; @@ -11135,6 +11151,9 @@ static long tg_get_cfs_burst(struct task_group *tg) { u64 burst_us; + if (tg->cfs_bandwidth.burst == RUNTIME_INF) + return -1; + burst_us = tg->cfs_bandwidth.burst; do_div(burst_us, NSEC_PER_USEC); @@ -11165,18 +11184,17 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } -static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, +static s64 cpu_cfs_burst_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { return tg_get_cfs_burst(css_tg(css)); } -static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, - struct cftype *cftype, u64 cfs_burst_us) +static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_burst_us) { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } - struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11382,8 +11400,8 @@ static struct cftype cpu_legacy_files[] = { }, { .name = "cfs_burst_us", - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, }, { .name = "stat", @@ -11618,8 +11636,8 @@ static struct cftype cpu_files[] = { { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9773437c4f2f..755220c80eaa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6275,6 +6275,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b99bad980d68..503819ce5908 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -340,6 +340,7 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 buffer; u64 runtime_snap; s64 hierarchical_quota; -- Gitee From 15f004b644a1f9be3c86da91d49d4bbc72c081b9 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Fri, 6 Mar 2020 18:34:40 +0800 Subject: [PATCH 430/953] anolis: sched/fair: Make CFS bandwidth controller burstable ANBZ: #8586 Accumulate unused quota from previous periods, thus accumulated bandwidth runtime can be used in the following periods. During accumulation, take care of runtime overflow. Previous non-burstable CFS bandwidth controller only assign quota to runtime, that saves a lot. A sysctl parameter sysctl_sched_cfs_bw_burst_onset_percent is introduced to denote how many percent of burst is given on setting cfs bandwidth. By default it is 0, which means on burst is allowed unless accumulated. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: remove sysctl_sched_cfs_bw_burst_enabled to make cpu burst default on, which is the same with upstream.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 32 ++++++++++++++++++------ kernel/sched/fair.c | 58 +++++++++++++++++++++++++++++++------------- kernel/sched/sched.h | 9 +++++-- 3 files changed, 72 insertions(+), 27 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e0ca336c9382..6105b188f982 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -175,6 +175,14 @@ static inline int __task_prio(const struct task_struct *p) return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ } +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + /* * l(a,b) * le(a,b) := !l(b,a) @@ -10972,7 +10980,7 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ /* More than 203 days if BW_SHIFT equals 20. */ -static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; +const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); @@ -10981,7 +10989,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 buffer; + u64 buffer, burst_onset; if (tg == &root_task_group) return -EINVAL; @@ -11048,14 +11056,22 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->burst = burst; cfs_b->buffer = buffer; - __refill_cfs_bandwidth_runtime(cfs_b); + cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); + cfs_b->runtime = cfs_b->quota; - /* - * Restart the period timer (if active) to handle new - * period expiry: - */ + /* burst_onset needed */ + if (cfs_b->quota != RUNTIME_INF && sysctl_sched_cfs_bw_burst_onset_percent > 0) { + + burst_onset = div_u64(burst, 100) * + sysctl_sched_cfs_bw_burst_onset_percent; + + cfs_b->runtime += burst_onset; + cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); + } + + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 1); } for_each_online_cpu(i) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 755220c80eaa..dddd8f7c6d8b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -161,6 +161,15 @@ static struct ctl_table sched_fair_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "sched_cfs_bw_burst_onset_percent", + .data = &sysctl_sched_cfs_bw_burst_onset_percent, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, #endif #ifdef CONFIG_NUMA_BALANCING { @@ -5479,22 +5488,18 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, + u64 overrun) { - s64 runtime; + u64 refill; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; - cfs_b->runtime += cfs_b->quota; - runtime = cfs_b->runtime_snap - cfs_b->runtime; - if (runtime > 0) { - cfs_b->burst_time += runtime; - cfs_b->nr_burst++; - } - - cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); - cfs_b->runtime_snap = cfs_b->runtime; + overrun = min(overrun, cfs_b->max_overrun); + refill = cfs_b->quota * overrun; + cfs_b->runtime += refill; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -5516,7 +5521,7 @@ static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 0); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -5986,7 +5991,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u cfs_b->nr_periods += overrun; /* Refill extra burst quota even if cfs_b->idle */ - __refill_cfs_bandwidth_runtime(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b, overrun); /* * idle depends on !throttled (for the case of a large deficit), and if @@ -6241,8 +6246,17 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) new = old * 2; if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); - cfs_b->quota *= 2; - cfs_b->burst *= 2; + cfs_b->quota = min(cfs_b->quota * 2, + max_cfs_runtime); + + cfs_b->burst = min(cfs_b->burst * 2, + max_cfs_runtime); + + cfs_b->buffer = min(max_cfs_runtime, + cfs_b->quota + cfs_b->burst); + /* Add 1 in case max_overrun becomes 0. */ + cfs_b->max_overrun >>= 1; + cfs_b->max_overrun++; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -6299,16 +6313,26 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) #endif } -void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + + /* + * When period timer stops, quota for the following period is not + * refilled, however period timer is already forwarded. We should + * accumulate quota once more than overrun here. + */ + if (!init) + __refill_cfs_bandwidth_runtime(cfs_b, overrun + 1); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 503819ce5908..f30f31e21443 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -104,6 +104,11 @@ struct cpuidle_state; #define TASK_ON_RQ_QUEUED 1 #define TASK_ON_RQ_MIGRATING 2 +#ifdef CONFIG_CFS_BANDWIDTH +extern const u64 max_cfs_runtime; +extern unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; @@ -341,6 +346,7 @@ struct cfs_bandwidth { u64 runtime; u64 burst; u64 buffer; + u64 max_overrun; u64 runtime_snap; s64 hierarchical_quota; @@ -457,8 +463,7 @@ extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *parent); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern bool cfs_task_bw_constrained(struct task_struct *p); -- Gitee From abfcb9f44f4f1fb7006eca437968430f168a2ab9 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Wed, 11 Mar 2020 11:07:54 +0800 Subject: [PATCH 431/953] anolis: sched/fair: Add cfs bandwidth burst statistics ANBZ: #8586 Introduce statistics exports for the burstable cfs bandwidth controller. The following exports are included: current_bw: current runtime in global pool nr_burst: number of periods bandwidth burst occurs burst_time: cumulative wall-time that any cpus has used above quota in respective periods Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: only add current_bw and turn it to usec in cgroup v2, keeping the same unit with upstream.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 11 +++++++++-- kernel/sched/fair.c | 11 ++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6105b188f982..132747a09f89 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11069,6 +11069,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); } + cfs_b->runtime_snap = cfs_b->runtime; + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) start_cfs_bandwidth(cfs_b, 1); @@ -11321,6 +11323,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "current_bw %llu\n", cfs_b->runtime); seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); @@ -11464,20 +11467,24 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec, burst_usec; + u64 throttled_usec, current_bw_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + current_bw_usec = cfs_b->runtime; + do_div(current_bw_usec, NSEC_PER_USEC); burst_usec = cfs_b->burst_time; do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" "throttled_usec %llu\n" + "current_bw_usec %llu\n" "nr_bursts %d\n" "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec, cfs_b->nr_burst, burst_usec); + throttled_usec, current_bw_usec, cfs_b->nr_burst, + burst_usec); } #endif return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index dddd8f7c6d8b..a9590366be9d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5491,15 +5491,24 @@ static inline u64 sched_cfs_bandwidth_slice(void) static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, u64 overrun) { - u64 refill; + u64 refill, runtime; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; + if (cfs_b->runtime_snap > cfs_b->runtime) { + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > cfs_b->quota) { + cfs_b->burst_time += runtime - cfs_b->quota; + cfs_b->nr_burst++; + } + } + overrun = min(overrun, cfs_b->max_overrun); refill = cfs_b->quota * overrun; cfs_b->runtime += refill; cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); + cfs_b->runtime_snap = cfs_b->runtime; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) -- Gitee From 7647c2fb166ca34a3169f9fc1126ffe6a818ea8a Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Wed, 16 Dec 2020 18:43:19 +0800 Subject: [PATCH 432/953] anolis: sched/fair: Add document for burstable CFS bandwidth control ANBZ: #8586 Basic description of usage and effect for CFS Bandwidth Control Burst. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: add documents about anolis own fields.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- Documentation/admin-guide/cgroup-v2.rst | 3 ++- Documentation/scheduler/sched-bwc.rst | 9 ++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 8238711ee842..aa0edf0d07f0 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1045,11 +1045,12 @@ All time durations are in microseconds. - user_usec - system_usec - and the following five when the controller is enabled: + and the following six when the controller is enabled: - nr_periods - nr_throttled - throttled_usec + - current_bw - nr_bursts - burst_usec diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 41ed2ceafc92..329b00ba40f3 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -122,9 +122,15 @@ This is tunable via procfs:: Larger slice values will reduce transfer overheads, while smaller values allow for more fine-grained consumption. +Sometimes users might want a group to burst without accumulation. This is +tunable via:: + /proc/sys/kernel/sched_cfs_bw_burst_onset_percent (default=0) + +Up to 100% runtime of cpu.cfs_burst_us might be given on setting bandwidth. + Statistics ---------- -A group's bandwidth statistics are exported via 5 fields in cpu.stat. +A group's bandwidth statistics are exported via 6 fields in cpu.stat. cpu.stat: @@ -132,6 +138,7 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- current_bw: Current runtime in global pool. - nr_bursts: Number of periods burst occurs. - burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used above quota in respective periods. -- Gitee From 95a883dfa76fd7ba012d382a607c91192208d88a Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Tue, 5 May 2020 18:29:41 +0800 Subject: [PATCH 433/953] anolis: sched/fair: Introduce init buffer into CFS burst ANBZ: #8586 For CFS burst, cpu.cfs_burst_us is used to denote how much unused cputime a group can accumulate. However, users may want a much bigger buffer at first, and a smaller buffer at runtime. Thus, cpu.cfs_init_buffer_us is introduced to denote how much init cputime a group is granted at the very beginning. A group can consume cputime from init buffer without being throttled. When cputime from init buffer drops below cpu.cfs_burst_us, normal behaviour of CPU burst is restored. Also init buffer has no effect on its group tasks any more. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: merge ("anolis: sched: Support cpu burst in cgroup v2").] Signed-off-by: Yi Tao Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 79 +++++++++++++++++++++++++++++++++++++++----- kernel/sched/fair.c | 4 ++- kernel/sched/sched.h | 2 ++ 3 files changed, 75 insertions(+), 10 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 132747a09f89..59548e772e80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10985,7 +10985,7 @@ const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, - u64 burst) + u64 burst, u64 init_buffer) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -11023,7 +11023,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, /* * Bound burst to defend burst against overflow during bandwidth shift. */ - if (burst > max_cfs_runtime) + if (burst > max_cfs_runtime || init_buffer > max_cfs_runtime) return -EINVAL; if (quota == RUNTIME_INF) @@ -11055,6 +11055,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->quota = quota; cfs_b->burst = burst; cfs_b->buffer = buffer; + cfs_b->init_buffer = init_buffer; cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); cfs_b->runtime = cfs_b->quota; @@ -11069,6 +11070,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); } + cfs_b->runtime = max(cfs_b->runtime, init_buffer); + cfs_b->current_buffer = max(cfs_b->buffer, init_buffer); cfs_b->runtime_snap = cfs_b->runtime; /* Restart the period timer (if active) to handle new period expiry: */ @@ -11096,10 +11099,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; period = ktime_to_ns(tg->cfs_bandwidth.period); burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -11107,7 +11111,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_quota(struct task_group *tg) @@ -11125,7 +11129,7 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; @@ -11133,8 +11137,9 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_period(struct task_group *tg) @@ -11149,7 +11154,7 @@ static long tg_get_cfs_period(struct task_group *tg) static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if (cfs_burst_us < 0) burst = RUNTIME_INF; @@ -11161,8 +11166,9 @@ static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) burst = (u64)cfs_burst_us * NSEC_PER_USEC; period = ktime_to_ns(tg->cfs_bandwidth.period); quota = tg->cfs_bandwidth.quota; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_burst(struct task_group *tg) @@ -11178,6 +11184,36 @@ static long tg_get_cfs_burst(struct task_group *tg) return burst_us; } +static int tg_set_cfs_init_buffer(struct task_group *tg, long cfs_init_buffer_us) +{ + u64 quota, period, burst, init_buffer; + + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; + if (cfs_init_buffer_us < 0) + init_buffer = RUNTIME_INF; + else if ((u64)cfs_init_buffer_us <= U64_MAX / NSEC_PER_USEC) + init_buffer = (u64)cfs_init_buffer_us * NSEC_PER_USEC; + else + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); +} + +static long tg_get_cfs_init_buffer(struct task_group *tg) +{ + u64 init_buffer_us; + + if (tg->cfs_bandwidth.init_buffer == RUNTIME_INF) + return -1; + + init_buffer_us = tg->cfs_bandwidth.init_buffer; + do_div(init_buffer_us, NSEC_PER_USEC); + + return init_buffer_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -11213,6 +11249,19 @@ static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } + +static s64 cpu_cfs_init_buffer_read_s64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_init_buffer(css_tg(css)); +} + +static int cpu_cfs_init_buffer_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_init_buffer_us) +{ + return tg_set_cfs_init_buffer(css_tg(css), cfs_init_buffer_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11422,6 +11471,11 @@ static struct cftype cpu_legacy_files[] = { .read_s64 = cpu_cfs_burst_read_s64, .write_s64 = cpu_cfs_burst_write_s64, }, + { + .name = "cfs_init_buffer_us", + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, + }, { .name = "stat", .seq_show = cpu_cfs_stat_show, @@ -11616,6 +11670,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct task_group *tg = css_tg(of_css(of)); + u64 init_buffer = tg_get_cfs_init_buffer(tg); u64 period = tg_get_cfs_period(tg); u64 burst = tg_get_cfs_burst(tg); u64 quota; @@ -11623,7 +11678,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota, burst); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); return ret ?: nbytes; } #endif @@ -11662,6 +11717,12 @@ static struct cftype cpu_files[] = { .read_s64 = cpu_cfs_burst_read_s64, .write_s64 = cpu_cfs_burst_write_s64, }, + { + .name = "max.init_buffer", + .flags = CFTYPE_NOT_ON_ROOT, + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, + }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a9590366be9d..a1f48b1cc568 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5504,10 +5504,11 @@ static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, } } + cfs_b->current_buffer = max(cfs_b->runtime, cfs_b->buffer); overrun = min(overrun, cfs_b->max_overrun); refill = cfs_b->quota * overrun; cfs_b->runtime += refill; - cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); + cfs_b->runtime = min(cfs_b->runtime, cfs_b->current_buffer); cfs_b->runtime_snap = cfs_b->runtime; } @@ -6298,6 +6299,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->init_buffer = 0; cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f30f31e21443..88dad12e4f67 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -345,6 +345,8 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 init_buffer; + u64 current_buffer; u64 buffer; u64 max_overrun; u64 runtime_snap; -- Gitee From 2b3f33c3d24e1f8feb0d59e64992ebbfcc6a2dbd Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Mon, 19 Feb 2024 15:57:42 +0800 Subject: [PATCH 434/953] anolis: sched/core: introduce CPUTIME_SIBIDLE_TASK ANBZ: #8547 As acpu uses rq_clock() as clock source to account sibidle time, irq time will be accounted into sibidle time. However, in some scenarios, sibidle sum will be much larger than exec runtime, e.g., we observed that sibidle time of task calling futex_wake() is 50% larger than exec runtime, which is confusing. We introduce cpustat[CPUTIME_SIBIDLE_TASK] to account the time that a task is actually running while the SMT siblings are idle, using rq_clock_task() as clock source. Similarly, we introduce cpustat[CPUTIME_FORCEIDLE_TASK] to account the time that a task is actually running while the SMT siblings are forced idle, using rq_clock_task() as clock source. |<----------------------sibidle time---------------------->| |<---sibidle task time--->| |<- -sibidle task time--->| |<------exec runtime----->| |<-----exec runtime------>| ht0 | A running | irq | A running | ht1 | idle | And for ht aware quota, the sibidle_delta_task * ratio will be accounted to the task's cfs_rq_runtime, rather than sibidle_delta. Interfaces: - task level: /proc/$pid/sched, row core_forceidle_task_sum, row core_sibidle_task_sum. - cgroup level: /sys/fs/cgroup/$cg/cpu.stat, row core_sched.force_idle_task_usec row sibidle_task_usec. Signed-off-by: Cruz Zhao --- include/linux/cgroup-defs.h | 2 ++ include/linux/kernel_stat.h | 4 +++- include/linux/sched.h | 2 ++ kernel/cgroup/rstat.c | 22 ++++++++++++++++++++++ kernel/sched/core.c | 29 +++++++++++++++++++++++++---- kernel/sched/core_sched.c | 8 +++++++- kernel/sched/cputime.c | 8 +++++++- kernel/sched/debug.c | 2 ++ kernel/sched/sched.h | 3 +++ 9 files changed, 73 insertions(+), 7 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 95bc8b0a231f..13d7a1f91ede 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -304,9 +304,11 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; + u64 forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 sibidle_sum; + u64 sibidle_task_sum; #endif }; diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 01f0c6391a98..9e86ee77d335 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,9 +30,11 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, + CPUTIME_FORCEIDLE_TASK, #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) CPUTIME_SIBIDLE, + CPUTIME_SIBIDLE_TASK, #endif NR_STATS, }; @@ -134,7 +136,7 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) -extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, bool fi); +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, u64 delta_task, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c39c38f78b9..44f5db175d2e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -541,9 +541,11 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; #endif #if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) u64 core_sibidle_sum; + u64 core_sibidle_task_sum; #endif #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index a29c5275c68e..2ac57d3760cf 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -326,9 +326,11 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum += src_bstat->forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) dst_bstat->sibidle_sum += src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum += src_bstat->sibidle_task_sum; #endif } @@ -340,9 +342,11 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum -= src_bstat->forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum -= src_bstat->sibidle_task_sum; #endif } @@ -436,11 +440,17 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; + case CPUTIME_FORCEIDLE_TASK: + rstatc->bstat.forceidle_task_sum += delta_exec; + break; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) case CPUTIME_SIBIDLE: rstatc->bstat.sibidle_sum += delta_exec; break; + case CPUTIME_SIBIDLE_TASK: + rstatc->bstat.sibidle_task_sum += delta_exec; + break; #endif default: break; @@ -484,9 +494,11 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; + bstat->forceidle_task_sum += cpustat[CPUTIME_FORCEIDLE_TASK]; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; + bstat->sibidle_task_sum += cpustat[CPUTIME_SIBIDLE_TASK]; #endif } } @@ -498,9 +510,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) struct cgroup_base_stat bstat; #ifdef CONFIG_SCHED_CORE u64 forceidle_time; + u64 forceidle_task_time; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 sibidle_time; + u64 sibidle_task_time; #endif if (cgroup_parent(cgrp)) { @@ -510,9 +524,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; + forceidle_task_time = cgrp->bstat.forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) sibidle_time = cgrp->bstat.sibidle_sum; + sibidle_task_time = cgrp->bstat.sibidle_task_sum; #endif cgroup_rstat_flush_release(); } else { @@ -522,9 +538,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; + forceidle_task_time = bstat.forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) sibidle_time = bstat.sibidle_sum; + sibidle_task_time = bstat.sibidle_task_sum; #endif } @@ -533,9 +551,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) do_div(stime, NSEC_PER_USEC); #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); + do_div(forceidle_task_time, NSEC_PER_USEC); #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) do_div(sibidle_time, NSEC_PER_USEC); + do_div(sibidle_task_time, NSEC_PER_USEC); #endif seq_printf(seq, "usage_usec %llu\n" @@ -545,9 +565,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); + seq_printf(seq, "core_sched.force_idle_task_usec %llu\n", forceidle_task_time); #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); + seq_printf(seq, "sibidle_task_usec %llu\n", sibidle_task_time); #endif } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 59548e772e80..b84f75281e62 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -382,6 +382,7 @@ static void __sched_core_flip(bool enabled) cpu_rq(t)->core_enabled = enabled; cpu_rq(cpu)->core->core_sibidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start_task = 0; sched_core_unlock(cpu, &flags); @@ -5050,8 +5051,9 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str const int cpu = cpu_of(rq); const struct cpumask *smt_mask = cpu_smt_mask(cpu); u64 now = rq_clock(rq); - u64 sibidle_sum, last_update_time; - s64 delta, last; + u64 now_task = rq_clock_task(rq); + u64 sibidle_sum, sibidle_task_sum, last_update_time, last_update_time_task; + s64 delta, delta_task, last, last_task; int i; if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) @@ -5087,29 +5089,44 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str rq_i->last_acpu_update_time); last_update_time = last >= 0 ? rq->last_acpu_update_time : rq_i->last_acpu_update_time; + last_task = (s64)(rq->last_acpu_update_time_task - + rq_i->last_acpu_update_time_task); + last_update_time_task = last_task >= 0 ? + rq->last_acpu_update_time_task : + rq_i->last_acpu_update_time_task; /* * Sibling may update acpu at the same time, and it's * timestamp may be newer than this rq. */ delta = now - last_update_time; delta = delta > 0 ? delta : 0; + delta_task = now_task - last_update_time_task; + delta_task = delta_task > 0 ? delta_task : 0; /* Add the delta to improve accuracy. */ sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; - if (curr_i == rq_i->idle) + sibidle_task_sum = last_task >= 0 ? rq->sibidle_task_sum : + rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) { sibidle_sum += delta; + sibidle_task_sum += delta_task; + } } } if (prev != rq->idle) { delta = sibidle_sum - rq->sibidle_sum; delta = delta > 0 ? delta : 0; - __account_sibidle_time(prev, delta, false); + delta_task = sibidle_task_sum - rq->sibidle_task_sum; + delta_task = delta_task > 0 ? delta_task : 0; + __account_sibidle_time(prev, delta, delta_task, false); } rq->sibidle_sum = sibidle_sum; + rq->sibidle_task_sum = sibidle_task_sum; out: rq->last_acpu_update_time = now; + rq->last_acpu_update_time_task = now_task; } #else static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) @@ -6274,6 +6291,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) sched_core_account_sibidle(rq); /* reset after accounting force idle */ rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_start_task = 0; rq->core->core_sibidle_count = 0; rq->core->core_sibidle_occupation = 0; if (rq->core->core_forceidle_count) { @@ -6369,6 +6387,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (schedstat_enabled() && rq->core->core_sibidle_count) { rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_start_task = rq_clock_task(rq->core); rq->core->core_sibidle_occupation = occ; } @@ -6636,6 +6655,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu) * have a cookie. */ core_rq->core_sibidle_start = 0; + core_rq->core_sibidle_start_task = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -10208,6 +10228,7 @@ void __init sched_init(void) rq->core_sibidle_count = 0; rq->core_sibidle_occupation = 0; rq->core_sibidle_start = 0; + rq->core_sibidle_start_task = 0; rq->core_cookie = 0UL; #endif diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 8db2999e51c8..f931992fc08e 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -241,6 +241,7 @@ void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); + u64 delta_task, now_task = rq_clock_task(rq->core); struct rq *rq_i; struct task_struct *p; int i; @@ -258,10 +259,12 @@ void __sched_core_account_sibidle(struct rq *rq) goto out; delta = now - rq->core->core_sibidle_start; + delta_task = now_task - rq->core->core_sibidle_start_task; if (unlikely((s64)delta <= 0)) goto out; rq->core->core_sibidle_start = now; + rq->core->core_sibidle_start_task = now_task; if (rq->core->core_sibidle_count > 1 || rq->core->core_sibidle_occupation > 1) { @@ -272,6 +275,8 @@ void __sched_core_account_sibidle(struct rq *rq) */ delta *= rq->core->core_sibidle_count; delta = div_u64(delta, rq->core->core_sibidle_occupation); + delta_task *= rq->core->core_sibidle_count; + delta_task = div_u64(delta_task, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -285,7 +290,8 @@ void __sched_core_account_sibidle(struct rq *rq) * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_sibidle_time(p, delta, !!rq->core->core_forceidle_count); + __account_sibidle_time(p, delta, delta_task, + !!rq->core->core_forceidle_count); } out: diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 6e995f4857be..2a6701040cf6 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -237,18 +237,24 @@ void account_idle_time(u64 cputime) * * REQUIRES: schedstat is enabled. */ -void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) +void __account_sibidle_time(struct task_struct *p, u64 delta, u64 delta_task, bool fi) { unsigned int cpu = task_cpu(p); __schedstat_add(p->stats.core_sibidle_sum, delta); + __schedstat_add(p->stats.core_sibidle_task_sum, delta_task); kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE_TASK] += delta_task; cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE_TASK, delta_task); #ifdef CONFIG_SCHED_CORE if (fi) { __schedstat_add(p->stats.core_forceidle_sum, delta); + __schedstat_add(p->stats.core_forceidle_task_sum, delta_task); kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE_TASK] += delta_task; cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE_TASK, delta_task); } #endif } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 464fa6b7c2a9..0baa877597df 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1059,9 +1059,11 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); + PN_SCHEDSTAT(core_forceidle_task_sum); #endif #ifdef CONFIG_SCHED_ACPU PN_SCHEDSTAT(core_sibidle_sum); + PN_SCHEDSTAT(core_sibidle_task_sum); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 88dad12e4f67..fc96d29e2c70 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1165,6 +1165,7 @@ struct rq { unsigned int core_forceidle_seq; unsigned int core_sibidle_occupation; u64 core_sibidle_start; + u64 core_sibidle_start_task; unsigned int core_sibidle_count; #endif @@ -1179,7 +1180,9 @@ struct rq { #ifdef CONFIG_SCHED_ACPU u64 acpu_idle_sum; u64 sibidle_sum; + u64 sibidle_task_sum; u64 last_acpu_update_time; + u64 last_acpu_update_time_task; #endif }; -- Gitee From 6bf4e4374ef61fe91395fee72a19987dab64ae51 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:13:46 +0800 Subject: [PATCH 435/953] anolis: newfeature: crypto: ccp: Support SM2 algorithm for hygon ccp. ANBZ: #8582 In order to add SM2 driver for hygon ccp, relating to SM2_sign, SM2_verify, SM2_encrypt and SM2_decrypt. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Kconfig | 7 + drivers/crypto/ccp/Makefile | 2 + drivers/crypto/ccp/ccp-crypto-main.c | 15 + drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 1053 +++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 39 + drivers/crypto/ccp/ccp-dev-v5.c | 45 + drivers/crypto/ccp/ccp-dev.h | 8 + drivers/crypto/ccp/ccp-ops.c | 94 ++ include/linux/ccp.h | 49 + 9 files changed, 1312 insertions(+) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm2-hygon.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf1..9d5d3312f8e3 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,13 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + help + Hygon GM ccp driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 82be0ac4a0b6..2f002be97210 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -24,3 +24,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46e..c2ef834eb1fa 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -39,6 +39,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -322,6 +326,17 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 000000000000..fbf1c5e85fce --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1053 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + int ret; + + if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(&req->base, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN * 3) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d07168..5133b921a5f5 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,43 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +304,7 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; } u; }; @@ -282,5 +320,6 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7b73332d6aa1..2c144fa64e88 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,6 +131,11 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; u16 raw; }; @@ -151,6 +156,8 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -584,6 +591,43 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = saddr->length; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1103,6 +1147,7 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821..2b45309b78fa 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -334,6 +334,7 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; } ____cacheline_aligned; struct ccp_device { @@ -528,6 +529,11 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +557,7 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; } u; }; @@ -657,6 +664,7 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index cb8e99936abb..99b59dd296f0 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2463,6 +2463,97 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2507,6 +2598,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a1..bd947cb8d41f 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,51 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +645,7 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +656,7 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE__LAST, }; @@ -657,6 +705,7 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; } u; /* Completion callback support */ -- Gitee From 67f3475a30304013be15bfd85f2ee75af5e473cb Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:20:16 +0800 Subject: [PATCH 436/953] anolis: newfeature: crypto: ccp: Support SM3 algorithm for hygon ccp. ANBZ: #8582 In order to add SM3 driver for hygon ccp, include sm3-hmac. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm3-hygon.c | 488 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 49 +++ drivers/crypto/ccp/ccp-dev-v5.c | 45 ++ drivers/crypto/ccp/ccp-dev.h | 10 + drivers/crypto/ccp/ccp-ops.c | 157 +++++++ include/linux/ccp.h | 44 ++ 8 files changed, 799 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm3-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 2f002be97210..32d93a9b1a6f 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,4 +25,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o -ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index c2ef834eb1fa..a2444759687d 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -332,6 +332,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 000000000000..46ddbc1f14a8 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,488 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 5133b921a5f5..33e54fcbca53 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -295,6 +295,53 @@ struct ccp_sm2_req_ctx { struct ccp_cmd cmd; }; +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -305,6 +352,7 @@ struct ccp_ctx { struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; } u; }; @@ -321,5 +369,6 @@ int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2c144fa64e88..7038be74bbb6 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -136,6 +136,11 @@ union ccp_function { u16 rsvd:11; u16 mode:3; } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; u16 raw; }; @@ -158,6 +163,7 @@ union ccp_function { #define CCP_ECC_AFFINE(p) ((p)->ecc.one) #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) +#define CCP_SM3_TYPE(p) ((p)->sm3.type) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -193,6 +199,8 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -628,6 +636,42 @@ static int ccp5_perform_sm2(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1148,6 +1192,7 @@ static const struct ccp_actions ccp5_actions = { .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2b45309b78fa..2d6c4c404539 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -335,6 +335,7 @@ struct ccp_cmd_queue { unsigned long total_pt_ops; unsigned long total_ecc_ops; unsigned long total_sm2_ops; + unsigned long total_sm3_ops; } ____cacheline_aligned; struct ccp_device { @@ -534,6 +535,11 @@ struct ccp_sm2_op { enum ccp_sm2_mode mode; }; +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -558,6 +564,7 @@ struct ccp_op { struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; } u; }; @@ -606,6 +613,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -616,6 +624,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -665,6 +674,7 @@ struct ccp_actions { int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 99b59dd296f0..79aa8ff654dd 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2554,6 +2554,160 @@ static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2601,6 +2755,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM2: ret = ccp_run_sm2_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index bd947cb8d41f..cda875cf3c71 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -634,6 +634,47 @@ struct ccp_sm2_engine { u32 dst_len; }; +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + /** * ccp_engine - CCP operation identifiers * @@ -646,6 +687,7 @@ struct ccp_sm2_engine { * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -657,6 +699,7 @@ enum ccp_engine { CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, CCP_ENGINE__LAST, }; @@ -706,6 +749,7 @@ struct ccp_cmd { struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; } u; /* Completion callback support */ -- Gitee From 11c854693c3d0c1097ed8394b9ebb4b65e8dc285 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:25:26 +0800 Subject: [PATCH 437/953] anolis: newfeature: crypto: ccp: Support SM4 algorithm for hygon ccp. ANBZ: #8582 In order to add SM4 driver for hygon ccp, relating to sm4 mode of ecb/ecb_hs, cbc/cbc_hs, cfb, ofb and ctr Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 324 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 17 ++ drivers/crypto/ccp/ccp-dev-v5.c | 127 ++++++++- drivers/crypto/ccp/ccp-dev.h | 18 ++ drivers/crypto/ccp/ccp-ops.c | 230 +++++++++++++++ include/linux/ccp.h | 114 ++++++++ 8 files changed, 835 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm4-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 32d93a9b1a6f..79a764bb11e7 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -26,4 +26,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-sha.o ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ - ccp-crypto-sm3-hygon.o + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index a2444759687d..3d22fbabc815 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -336,6 +336,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 000000000000..0d1c750ff118 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,324 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 33e54fcbca53..58b2950f9100 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -342,6 +342,21 @@ struct ccp_sm3_exp_ctx { u8 buf[SM3_BLOCK_SIZE]; }; +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -353,6 +368,7 @@ struct ccp_ctx { struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -370,5 +386,6 @@ int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7038be74bbb6..08c8d72aaf79 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -141,6 +141,18 @@ union ccp_function { u16 type:4; u16 rsvd2:1; } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; u16 raw; }; @@ -164,6 +176,12 @@ union ccp_function { #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) #define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -672,6 +690,90 @@ static int ccp5_perform_sm3(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1150,6 +1252,26 @@ static void ccp5_destroy(struct ccp_device *ccp) } } +static int ccp5_get_trng_mask_param(void) +{ + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + +#ifdef CONFIG_HYGON_GM + /* for sm4 HS */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 16; +#endif + + /* for AES HS */ + return 12; +} + static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1160,12 +1282,13 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; + int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < 12; i++) { + for (i = 0; i < len; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1193,6 +1316,8 @@ static const struct ccp_actions ccp5_actions = { .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2d6c4c404539..92b859dae7c6 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -336,6 +336,8 @@ struct ccp_cmd_queue { unsigned long total_ecc_ops; unsigned long total_sm2_ops; unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -540,6 +542,18 @@ struct ccp_sm3_op { u64 msg_bits; }; +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -565,6 +579,8 @@ struct ccp_op { struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -675,6 +691,8 @@ struct ccp_actions { int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 79aa8ff654dd..7495e233446f 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2708,6 +2708,230 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2758,6 +2982,12 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM3: ret = ccp_run_sm3_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index cda875cf3c71..8e34f05bc6b1 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -675,6 +675,116 @@ struct ccp_sm3_engine { u64 msg_bits; }; +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + /** * ccp_engine - CCP operation identifiers * @@ -700,6 +810,8 @@ enum ccp_engine { CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -750,6 +862,8 @@ struct ccp_cmd { struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */ -- Gitee From 37942349f7ac0bf120c8c66fde12fb8b5439e63f Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 19 Mar 2024 20:13:22 +0800 Subject: [PATCH 438/953] anolis: bugfix: crypto: ccp: fix sm2 not return due to wrong complete callback parameter ANBZ: #8582 the complete callback 'crypto_req_done' has changed its input parameter, we need update input in ccp-crypto implement. Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index fbf1c5e85fce..5d39842de3fd 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -645,7 +645,7 @@ static void ccp_sm2_enc_compute(struct work_struct *work) crypto_free_shash(shash); e_complete: - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static void ccp_sm2_enc_lp(struct work_struct *work) @@ -671,7 +671,7 @@ static void ccp_sm2_enc_lp(struct work_struct *work) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); if (ret != -EBUSY && ret != -EINPROGRESS) - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_encrypt(struct akcipher_request *req) @@ -748,7 +748,7 @@ static void ccp_sm2_dec_compute(struct work_struct *work) /* clear private key, plain, and dC1 */ memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); memset(dst, 0, CCP_SM2_DST_SIZE); - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_decrypt(struct akcipher_request *req) -- Gitee From 8b468e1aac423eeef39841bec10940761405d4d5 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:02:26 +0800 Subject: [PATCH 439/953] anolis: bugfix: crypto: ccp: It prompt ILLEGAL_MEM_ADDR when using PSPCCP. ANBZ: #8582 ccp_find_lsb_regions check from vq_1 but status value start from vq_0. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 08c8d72aaf79..2fc4f08698df 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -783,6 +783,7 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ + status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); -- Gitee From a639541a6b0690a5f077a3c42e15e1d427a1c29e Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:40:53 +0800 Subject: [PATCH 440/953] anolis: bugfix: crypto: ccp: Only handle interrupts by completion, eliminating by empty queue. ANBZ: #8582 fix the repetitive interrupt (INT_COMPLETION & INT_EMPTY_QUEUE) in one cmd process. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2fc4f08698df..2179da3c9483 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -935,7 +935,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status) { + if (status & SUPPORTED_INTERRUPTS) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); -- Gitee From 2daf0cb8aa02e04b1db1729b9180403b86165f7d Mon Sep 17 00:00:00 2001 From: Xiangyu Xu Date: Mon, 22 Aug 2022 10:47:25 +0800 Subject: [PATCH 441/953] anolis: bugfix: crypto: ccp: Fix a problem that vq thread may stuck when do multi process test. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2179da3c9483..0a304b0ce99d 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -944,10 +944,9 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - cmd_q->int_rcvd = 1; - /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } -- Gitee From cf1b9779a038392a3cef9fdcf657a97d3ed8035e Mon Sep 17 00:00:00 2001 From: yangdepei Date: Fri, 17 Nov 2023 16:21:57 +0800 Subject: [PATCH 442/953] anolis: bugfix: fix sm2 test failed in testmgr because of missing DER coding ANBZ: #8582 Add DER coding support for ccp sm2 sign interface. Signed-off-by: liulanyi Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 6 +- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 120 ++++++++++++++++++++-- drivers/crypto/ccp/ccp_sm2_sign.asn1 | 4 + 3 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 drivers/crypto/ccp/ccp_sm2_sign.asn1 diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 79a764bb11e7..94c673805325 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,6 +25,10 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ ccp-crypto-sm3-hygon.o \ - ccp-crypto-sm4-hygon.o + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index 5d39842de3fd..a83737f56d4e 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -18,6 +18,7 @@ #include #include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, @@ -100,6 +101,47 @@ struct ccp_sm2_dst { u8 result_t[CCP_SM2_OPERAND_LEN]; }; +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + static bool ccp_sm2_is_zero(const u64 *data, u32 count) { u32 i; @@ -449,11 +491,21 @@ static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; int ret; - if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) return -EINVAL; + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + /* check whether public key is valid */ rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); if (!rctx) @@ -830,21 +882,71 @@ static int ccp_sm2_verify(struct akcipher_request *req) struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; int nents; int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; if (!ctx->u.sm2.pub_key_len) return -ENOKEY; - if (req->src_len != CCP_SM2_OPERAND_LEN * 3) - return -EINVAL; + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; - nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); - if (nents < 0) - return -EINVAL; + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); - scatterwalk_map_and_copy(src->operand_e, req->src, 0, - CCP_SM2_OPERAND_LEN * 3, 0); memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); @@ -854,6 +956,8 @@ static int ccp_sm2_verify(struct akcipher_request *req) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, CCP_SM2_MODE_VERIFY, 0); +error: + kfree(buffer); return ret; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 000000000000..7e83e6799cb4 --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} -- Gitee From a12bbb26fe1fb745e642eefb03a224f1d4af5887 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 18 Mar 2024 14:53:46 +0800 Subject: [PATCH 443/953] anolis: bugfix: crypto: ccp: fix bug that SM2 encryption of long data causes kernel crash ANBZ: #8582 Signed-off-by: liulanyi Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index a83737f56d4e..25c9a49f7d22 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -670,8 +670,6 @@ static void ccp_sm2_enc_compute(struct work_struct *work) goto e_complete; } - scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); - /* C2 = M ^ t */ ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, req->dst, CCP_SM2_ENCRYPT_EXT_LEN, -- Gitee From 812958ca6cc8a3377450f0721e58a6476576bae0 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 14:19:29 +0800 Subject: [PATCH 444/953] anolis: newfeature: crypto: ccp: Modify value of COMMANDS_PER_QUEUE from 16 to 8192. ANBZ: #8582 change command queue size to 8192 to support multipule cmd in hygon ccp Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 32 ++++++++++++++++++++++++-------- drivers/crypto/ccp/ccp-dev.h | 11 +++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 0a304b0ce99d..62e07c9eb793 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -227,6 +227,17 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) +static inline unsigned int command_per_queue(void) +{ +#ifdef CONFIG_HYGON_GM + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + HYGON_COMMANDS_PER_QUEUE : + COMMANDS_PER_QUEUE; +#else + return COMMANDS_PER_QUEUE; +#endif +} + static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -240,15 +251,16 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start; + u32 head_lo, queue_start, command_per_q; + command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + n = head_idx + command_per_q - cmd_q->qidx - 1; - return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ + return n % command_per_q; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -256,10 +268,11 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail; + u32 tail, command_per_q; int i; int ret = 0; + command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -273,7 +286,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; /* The data used by this command must be flushed to memory */ wmb(); @@ -974,7 +987,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi; + u32 status_lo, status_hi, command_per_q, queue_size_val; int ret; /* Find available queues */ @@ -991,6 +1004,9 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } + command_per_q = command_per_queue(); + queue_size_val = QUEUE_SIZE_VAL(command_per_q); + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1017,7 +1033,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1104,7 +1120,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 92b859dae7c6..5dec502f3c5d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,12 +99,15 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 -#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ - CMD5_Q_SIZE) -#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define HYGON_COMMANDS_PER_QUEUE 8192 + #define Q_DESC_SIZE sizeof(struct ccp5_desc) -#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) +#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) +#define Q_SIZE(c, n) ((c)*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 -- Gitee From 2b0a8af73bcc68c5c39235b84a66aec9f5f53105 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 18:57:08 +0800 Subject: [PATCH 445/953] anolis: newfeature: crypto: ccp: Process multiple VQ commands once for SM3 ccp. ANBZ: #8582 optimize sm3 processing performance, the physical page of each sg list corresponds to a CCP cmd, all cmd prepared, then start ccp. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 73 ++++++++++++++++++++++++++++++++- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 19 +++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 62e07c9eb793..b14d18162ebc 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -263,6 +263,76 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) return n % command_per_q; /* Always one unused spot */ } +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + u32 command_per_q; + + command_per_q = command_per_queue(); + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { @@ -700,7 +770,7 @@ static int ccp5_perform_sm3(struct ccp_op *op) CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); } - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4(struct ccp_op *op) @@ -1334,6 +1404,7 @@ static const struct ccp_actions ccp5_actions = { .sm3 = ccp5_perform_sm3, .sm4 = ccp5_perform_sm4, .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5dec502f3c5d..e1aa68f4044c 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -696,6 +696,7 @@ struct ccp_actions { int (*sm3)(struct ccp_op *op); int (*sm4)(struct ccp_op *op); int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 7495e233446f..774c15b1d81e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2624,12 +2624,25 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left && sm3->final) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm3(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + ccp_process_data(&src, NULL, &op); } } else { @@ -2649,6 +2662,12 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) cmd->engine_error = cmd_q->cmd_error; goto e_data; } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } } ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, -- Gitee From e7791ceb137c09a1c8158cd469a1a43ba3ff0418 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 9 May 2022 07:02:32 +0800 Subject: [PATCH 446/953] anolis: newfeature: crypto: ccp: Process multiple VQ commands once for SM4/SM4-CTR ccp. ANBZ: #8582 optimize sm4 processing performance by starting ccp after all cmd has been prepared Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 4 ++-- drivers/crypto/ccp/ccp-ops.c | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index b14d18162ebc..e5c129c3e049 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -812,7 +812,7 @@ static int ccp5_perform_sm4(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4_ctr(struct ccp_op *op) @@ -854,7 +854,7 @@ static int ccp5_perform_sm4_ctr(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 774c15b1d81e..794ad6d6eb5b 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2807,12 +2807,25 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } @@ -2918,12 +2931,25 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } -- Gitee From 288f76905b40bacc998e81d04bd134ee44ca2703 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Tue, 19 Mar 2024 16:22:11 +0800 Subject: [PATCH 447/953] anolis: perf mem/c2c: Add load store event mapping for Hygon ANBZ: #8643 Hygon support perf mem/c2c as AMD Zen CPU does, and use "ibs_op//" event as mem-ldst event. Signed-off-by: Qi Liu Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2950 --- tools/perf/Documentation/perf-c2c.txt | 8 ++++---- tools/perf/arch/x86/util/env.c | 15 +++++++++++++++ tools/perf/arch/x86/util/env.h | 1 + tools/perf/arch/x86/util/mem-events.c | 2 +- tools/perf/arch/x86/util/pmu.c | 2 +- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 856f0dfb8e5a..192ab0415ee9 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -21,9 +21,9 @@ you to track down the cacheline contentions. On Intel, the tool is based on load latency and precise store facility events provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling -with thresholding feature. On AMD, the tool uses IBS op pmu (due to hardware -limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses SPE to -sample load and store operations, therefore hardware and kernel support is +with thresholding feature. On AMD and Hygon, the tool uses IBS op pmu (due to +hardware limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses +SPE to sample load and store operations, therefore hardware and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide. Due to the statistical nature of Arm SPE sampling, not every memory operation will be sampled. @@ -152,7 +152,7 @@ default on Intel: cpu/mem-loads,ldlat=30/P cpu/mem-stores/P -following on AMD: +following on AMD and Hygon: ibs_op// diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c index 3e537ffb1353..f1de12d20b2a 100644 --- a/tools/perf/arch/x86/util/env.c +++ b/tools/perf/arch/x86/util/env.c @@ -17,3 +17,18 @@ bool x86__is_amd_cpu(void) ret: return is_amd >= 1 ? true : false; } + +bool x86__is_hygon_cpu(void) +{ + struct perf_env env = { .total_mem = 0, }; + static int is_hygon; /* 0: Uninitialized, 1: Yes, -1: No */ + + if (is_hygon) + goto ret; + + perf_env__cpuid(&env); + is_hygon = env.cpuid && strstarts(env.cpuid, "HygonGenuine") ? 1 : -1; + perf_env__exit(&env); +ret: + return is_hygon >= 1 ? true : false; +} diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h index d78f080b6b3f..904d5e228360 100644 --- a/tools/perf/arch/x86/util/env.h +++ b/tools/perf/arch/x86/util/env.h @@ -3,5 +3,6 @@ #define _X86_ENV_H bool x86__is_amd_cpu(void); +bool x86__is_hygon_cpu(void); #endif /* _X86_ENV_H */ diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c index 191b372f9a2d..f8d9aecbf2f2 100644 --- a/tools/perf/arch/x86/util/mem-events.c +++ b/tools/perf/arch/x86/util/mem-events.c @@ -33,7 +33,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i) if (i >= PERF_MEM_EVENTS__MAX) return NULL; - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return &perf_mem_events_amd[i]; return &perf_mem_events_intel[i]; diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index f428cffb0378..0af256236466 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -174,7 +174,7 @@ const char *pmu_find_alias_name(const char *name) int perf_pmus__num_mem_pmus(void) { /* AMD uses IBS OP pmu and not a core PMU for perf mem/c2c */ - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return 1; /* Intel uses core pmus for perf mem/c2c */ -- Gitee From a428fbb23eb64a83d454e239e697f0d371ae860b Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:21 +0800 Subject: [PATCH 448/953] firmware: arm_sdei: add interrupt binding api ANBZ: #8621 commit 394a8507f8556ca7f430007e3f20d15e19f7bdbc openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- This patch add a interrupt binding api function which returns the binded event number. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Signed-off-by: Yang Yingliang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- drivers/firmware/arm_sdei.c | 10 ++++++++++ include/linux/arm_sdei.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 285fe7ad490d..9171a9d05140 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -188,6 +188,16 @@ int sdei_api_event_context(u32 query, u64 *result) } NOKPROBE_SYMBOL(sdei_api_event_context); +int sdei_api_event_interrupt_bind(int hwirq) +{ + u64 event_number; + + invoke_sdei_fn(SDEI_1_0_FN_SDEI_INTERRUPT_BIND, hwirq, 0, 0, 0, 0, + &event_number); + + return (int)event_number; +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 255701e1251b..bf92dc48fbea 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -36,6 +36,7 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); +int sdei_api_event_interrupt_bind(int hwirq); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, -- Gitee From f5d4c9a89cc3839be0d28fd9e042f3069b6f34ee Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:22 +0800 Subject: [PATCH 449/953] firmware: arm_sdei: make 'sdei_api_event_disable/enable' public ANBZ: #8621 commit 2b61c8803b5ae428fa7408808c999d2a6e3f5c24 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- NMI Watchdog need to enable the event for each core individually. But the existing public api 'sdei_event_enable' enable events for all cores when the event type is private. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- drivers/firmware/arm_sdei.c | 4 ++-- include/linux/arm_sdei.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 9171a9d05140..36600024736d 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -389,7 +389,7 @@ static int sdei_platform_reset(void) return err; } -static int sdei_api_event_enable(u32 event_num) +int sdei_api_event_enable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 0, NULL); @@ -436,7 +436,7 @@ int sdei_event_enable(u32 event_num) return err; } -static int sdei_api_event_disable(u32 event_num) +int sdei_api_event_disable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 0, 0, NULL); diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index bf92dc48fbea..f5f6ba7a1d50 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -37,6 +37,8 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); int sdei_api_event_interrupt_bind(int hwirq); +int sdei_api_event_disable(u32 event_num); +int sdei_api_event_enable(u32 event_num); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, -- Gitee From b9d7270f7d45c78937d8d788423c49c9abdf0de9 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:23 +0800 Subject: [PATCH 450/953] lockup_detector: init lockup detector after all the init_calls ANBZ: #8621 commit 078428464b38d44898d9aa09dd6b66ebc681ae36 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- We call 'sdei_init' as 'subsys_initcall_sync'. lockup detector need to be initialised after sdei_init. The influence of this patch is that we can not detect the hard lockup in init_calls. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- init/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/init/main.c b/init/main.c index 9e6ab6d593bd..d964bc856943 100644 --- a/init/main.c +++ b/init/main.c @@ -1540,7 +1540,6 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); - lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1551,6 +1550,8 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); + lockup_detector_init(); + kunit_run_all_tests(); wait_for_initramfs(); -- Gitee From 71f61a48e34c62f0cf2629fb25ea86515c92e240 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:24 +0800 Subject: [PATCH 451/953] watchdog: add nmi_watchdog support for arm64 based on SDEI ANBZ: #8621 commit 06f3c8d593243b14ddfd020fb835d8d1f196cccb openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Add nmi_watchdog support for arm64 based on SDEI. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Conflicts: lib/Kconfig.debug arch/arm64/kernel/watchdog_sdei.c Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/watchdog_sdei.c | 112 ++++++++++++++++++++++++++++++ lib/Kconfig.debug | 9 +++ 3 files changed, 122 insertions(+) create mode 100644 arch/arm64/kernel/watchdog_sdei.c diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d95b3d6b471a..d48aa807dcce 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -68,6 +68,7 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c new file mode 100644 index 000000000000..8f9eb838b969 --- /dev/null +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Detect hard lockups on a system + * + * Note: Most of this code is borrowed heavily from the perf hardlockup + * detector, so thanks to Don for the initial implementation. + */ + +#define pr_fmt(fmt) "SDEI NMI watchdog: " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* We use the secure physical timer as SDEI NMI watchdog timer */ +#define SDEI_NMI_WATCHDOG_HWIRQ 29 + +static int sdei_watchdog_event_num; +static bool disable_sdei_nmi_watchdog; +static bool sdei_watchdog_registered; + +void watchdog_hardlockup_enable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + /* Skip the first hardlockup check incase BIOS didn't init the + * secure timer correctly */ + watchdog_hardlockup_touch_cpu(cpu); + ret = sdei_api_event_enable(sdei_watchdog_event_num); + if (ret) { + pr_err("Enable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); + } +} + +void watchdog_hardlockup_disable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + ret = sdei_api_event_disable(sdei_watchdog_event_num); + if (ret) + pr_err("Disable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); +} + +static int sdei_watchdog_callback(u32 event, + struct pt_regs *regs, void *arg) +{ + watchdog_hardlockup_check(smp_processor_id(), regs); + + return 0; +} + +static void sdei_nmi_watchdog_bind(void *data) +{ + int ret; + + ret = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (ret < 0) + pr_err("SDEI bind failed on cpu%d, return %d\n", + smp_processor_id(), ret); +} + +static int __init disable_sdei_nmi_watchdog_setup(char *str) +{ + disable_sdei_nmi_watchdog = true; + return 1; +} +__setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); + +int __init watchdog_hardlockup_probe(void) +{ + int ret; + + if (disable_sdei_nmi_watchdog) + return -EINVAL; + + if (!is_hyp_mode_available()) { + pr_err("Disable SDEI NMI Watchdog in VM\n"); + return -EINVAL; + } + + sdei_watchdog_event_num = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (sdei_watchdog_event_num < 0) { + pr_err("Bind interrupt failed. Firmware may not support SDEI !\n"); + return sdei_watchdog_event_num; + } + + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); + + ret = sdei_event_register(sdei_watchdog_event_num, + sdei_watchdog_callback, NULL); + if (ret) { + pr_err("SDEI Watchdog register callback failed\n"); + return ret; + } + + sdei_watchdog_registered = true; + pr_info("SDEI Watchdog registered successfully\n"); + + return 0; +} diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1612d79eb33e..a6b4a18190da 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1045,6 +1045,12 @@ config HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on SMP default y +config SDEI_WATCHDOG + bool "SDEI NMI Watchdog support" + depends on ARM_SDE_INTERFACE + depends on HARDLOCKUP_DETECTOR + select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER + # # Global switch whether to build a hardlockup detector at all. It is available # only when the architecture supports at least one implementation. There are @@ -1061,6 +1067,7 @@ config HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH imply HARDLOCKUP_DETECTOR_PERF imply HARDLOCKUP_DETECTOR_BUDDY + imply SDEI_WATCHDOG imply HARDLOCKUP_DETECTOR_ARCH select LOCKUP_DETECTOR @@ -1097,6 +1104,7 @@ config HARDLOCKUP_DETECTOR_PERF depends on HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH + depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_BUDDY @@ -1105,6 +1113,7 @@ config HARDLOCKUP_DETECTOR_BUDDY depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH + depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_ARCH -- Gitee From e92ad16b738dce945ddcf65c986fc4f4b2f70abc Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:25 +0800 Subject: [PATCH 452/953] sdei_watchdog: clear EOI of the secure timer before kdump ANBZ: #8621 commit 490288e8343f9914f850f13a00ff98f8b878fdcd openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- When we panic in hardlockup, the secure timer interrupt remains activate because firmware clear eoi after dispatch is completed. This will cause arm_arch_timer interrupt failed to trigger in the second kernel. This patch add a new SMC helper to clear eoi of a certain interrupt and clear eoi of the secure timer before booting the second kernel. Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Zheng Zengkai Conflicts: include/linux/nmi.h Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/machine_kexec.c | 10 ++++++++++ arch/arm64/kernel/watchdog_sdei.c | 6 ++++++ drivers/firmware/arm_sdei.c | 6 ++++++ include/linux/arm_sdei.h | 1 + include/linux/nmi.h | 6 ++++++ include/uapi/linux/arm_sdei.h | 1 + 6 files changed, 30 insertions(+) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 078910db77a4..cfa6b0dafc88 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,15 @@ void machine_crash_shutdown(struct pt_regs *regs) /* shutdown non-crashing cpus */ crash_smp_send_stop(); + /* + * when we panic in hardlockup detected by sdei_watchdog, the secure + * timer interrupt remains activate here because firmware clear eoi + * after dispatch is completed. This will cause arm_arch_timer + * interrupt failed to trigger in the second kernel. So we clear eoi + * of the secure timer before booting the second kernel. + */ + sdei_watchdog_clear_eoi(); + /* for crashing cpu */ crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 8f9eb838b969..7ebf6b5ab237 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -78,6 +78,12 @@ static int __init disable_sdei_nmi_watchdog_setup(char *str) } __setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); +void sdei_watchdog_clear_eoi(void) +{ + if (sdei_watchdog_registered) + sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); +} + int __init watchdog_hardlockup_probe(void) { int ret; diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 36600024736d..5e229d3eb552 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -198,6 +198,12 @@ int sdei_api_event_interrupt_bind(int hwirq) return (int)event_number; } +int sdei_api_clear_eoi(int hwirq) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SDEI_CLEAR_EOI, hwirq, 0, 0, 0, 0, + NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index f5f6ba7a1d50..6381537e7015 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -39,6 +39,7 @@ int sdei_event_disable(u32 event_num); int sdei_api_event_interrupt_bind(int hwirq); int sdei_api_event_disable(u32 event_num); int sdei_api_event_enable(u32 event_num); +int sdei_api_clear_eoi(int hwirq); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e92e378df000..404c78e04a05 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -235,4 +235,10 @@ static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif +#ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_clear_eoi(void); +#else +static inline void sdei_watchdog_clear_eoi(void) { } +#endif + #endif diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index af0630ba5437..1187b1b49c87 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -24,6 +24,7 @@ #define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E) #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) +#define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff -- Gitee From b95656e0ad2caac85452530717fac355f238a832 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:26 +0800 Subject: [PATCH 453/953] sdei_watchdog: set secure timer period base on 'watchdog_thresh' ANBZ: #8621 commit 7fc503364616a91334aa623a97949e2573b87d5d openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- The period of the secure timer is set to 3s by BIOS. That means the secure timer interrupt will trigger every 3 seconds. To further decrease the NMI watchdog's effect on performance, this patch set the period of the secure timer base on 'watchdog_thresh'. This variable is initiallized to 10s. We can also set the period at runtime by modifying '/proc/sys/kernel/watchdog_thresh' Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Conflicts: arch/arm64/kernel/watchdog_sdei.c (context conflict) Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 13 +++++++++++++ drivers/firmware/arm_sdei.c | 6 ++++++ include/linux/arm_sdei.h | 1 + include/uapi/linux/arm_sdei.h | 1 + 4 files changed, 21 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 7ebf6b5ab237..758e20eadc31 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -33,6 +33,8 @@ void watchdog_hardlockup_enable(unsigned int cpu) /* Skip the first hardlockup check incase BIOS didn't init the * secure timer correctly */ watchdog_hardlockup_touch_cpu(cpu); + sdei_api_set_secure_timer_period(watchdog_thresh); + ret = sdei_api_event_enable(sdei_watchdog_event_num); if (ret) { pr_err("Enable NMI Watchdog failed on cpu%d\n", @@ -102,6 +104,17 @@ int __init watchdog_hardlockup_probe(void) return sdei_watchdog_event_num; } + /* + * After we introduced 'sdei_api_set_secure_timer_period', we disselect + * 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP'. So we need to make sure that + * firmware can set the period of the secure timer and the timer + * interrupt doesn't trigger too soon. + */ + if (sdei_api_set_secure_timer_period(watchdog_thresh)) { + pr_err("Firmware doesn't support setting the secure timer period, please update your BIOS !\n"); + return -EINVAL; + } + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); ret = sdei_event_register(sdei_watchdog_event_num, diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 5e229d3eb552..0f7ef69071c0 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -204,6 +204,12 @@ int sdei_api_clear_eoi(int hwirq) NULL); } +int sdei_api_set_secure_timer_period(int sec) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD, sec, 0, 0, 0, + 0, NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 6381537e7015..28e247dd5773 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -40,6 +40,7 @@ int sdei_api_event_interrupt_bind(int hwirq); int sdei_api_event_disable(u32 event_num); int sdei_api_event_enable(u32 event_num); int sdei_api_clear_eoi(int hwirq); +int sdei_api_set_secure_timer_period(int sec); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index 1187b1b49c87..a5375679dd50 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -25,6 +25,7 @@ #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) #define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) +#define SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD SDEI_1_0_FN(0x19) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff -- Gitee From da25c8523cf7460e5712bd97d69e61f79c8548db Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:27 +0800 Subject: [PATCH 454/953] sdei_watchdog: avoid possible false hardlockup ANBZ: #8621 commit cac525db2b93ba824cd861b66ac7a374c3ebb069 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Firmware may not trigger SDEI event as required frequency. SDEI event may be triggered too soon, which cause false hardlockup in kernel. Check the time stamp in sdei_watchdog_callbak and skip the hardlockup check if it is invoked too soon. Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Conflicts: arch/arm64/kernel/watchdog_sdei.c (context conflict) Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 758e20eadc31..4a143a598eef 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -22,6 +22,7 @@ static int sdei_watchdog_event_num; static bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; +static DEFINE_PER_CPU(ktime_t, last_check_time); void watchdog_hardlockup_enable(unsigned int cpu) { @@ -34,6 +35,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) * secure timer correctly */ watchdog_hardlockup_touch_cpu(cpu); sdei_api_set_secure_timer_period(watchdog_thresh); + __this_cpu_write(last_check_time, ktime_get_mono_fast_ns()); ret = sdei_api_event_enable(sdei_watchdog_event_num); if (ret) { @@ -58,6 +60,22 @@ void watchdog_hardlockup_disable(unsigned int cpu) static int sdei_watchdog_callback(u32 event, struct pt_regs *regs, void *arg) { + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_check_time); + __this_cpu_write(last_check_time, now); + + /* + * Set delta to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + if (delta < watchdog_thresh * (u64)NSEC_PER_SEC * 4 / 5) { + pr_err(FW_BUG "SDEI Watchdog event triggered too soon, " + "time to last check:%lld ns\n", delta); + return 0; + } + watchdog_hardlockup_check(smp_processor_id(), regs); return 0; -- Gitee From 298dc622e0994f9f9b18bbfdef6162fd6a0a2b79 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:28 +0800 Subject: [PATCH 455/953] init: only move down lockup_detector_init() when sdei_watchdog is enabled ANBZ: #8621 commit 1509d06c9c41985ee6b7dd6acbb08d9ee5dcf2b3 openeuler hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- When I enable CONFIG_DEBUG_PREEMPT and CONFIG_PREEMPT on X86, I got the following Call Trace: [ 3.341853] BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1 [ 3.344392] caller is debug_smp_processor_id+0x17/0x20 [ 3.344395] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.10.0+ #398 [ 3.344397] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.2-0-g5f4c7b1-prebuilt.qemu-project.org 04/01/2014 [ 3.344399] Call Trace: [ 3.344410] dump_stack+0x60/0x76 [ 3.344412] check_preemption_disabled+0xba/0xc0 [ 3.344415] debug_smp_processor_id+0x17/0x20 [ 3.344422] hardlockup_detector_event_create+0xf/0x60 [ 3.344427] hardlockup_detector_perf_init+0xf/0x41 [ 3.344430] watchdog_nmi_probe+0xe/0x10 [ 3.344432] lockup_detector_init+0x22/0x5b [ 3.344437] kernel_init_freeable+0x20c/0x245 [ 3.344439] ? rest_init+0xd0/0xd0 [ 3.344441] kernel_init+0xe/0x110 [ 3.344446] ret_from_fork+0x22/0x30 It is because sched_init_smp() set 'current->nr_cpus_allowed' to possible cpu number, and check_preemption_disabled() failed. This issue is introduced by commit a79050434b45, which move down lockup_detector_init() after do_basic_setup(). Fix it by moving lockup_detector_init() to its origin place when sdei_watchdog is disabled. There is no problem when sdei_watchdog is enabled because watchdog_nmi_probe() is overridden in 'arch/arm64/kernel/watchdog_sdei.c' in this case. Fixes: a79050434b45 ("lockup_detector: init lockup detector after all the init_calls") Signed-off-by: Xiongfeng Wang Reviewed-by: Wei Li Signed-off-by: Chen Jun Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 2 +- include/linux/nmi.h | 2 ++ init/main.c | 6 +++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 4a143a598eef..155f36e24699 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -20,7 +20,7 @@ #define SDEI_NMI_WATCHDOG_HWIRQ 29 static int sdei_watchdog_event_num; -static bool disable_sdei_nmi_watchdog; +bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 404c78e04a05..7bd446acad24 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -237,8 +237,10 @@ static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #ifdef CONFIG_SDEI_WATCHDOG void sdei_watchdog_clear_eoi(void); +extern bool disable_sdei_nmi_watchdog; #else static inline void sdei_watchdog_clear_eoi(void) { } +#define disable_sdei_nmi_watchdog 1 #endif #endif diff --git a/init/main.c b/init/main.c index d964bc856943..989d197a06ff 100644 --- a/init/main.c +++ b/init/main.c @@ -1540,6 +1540,8 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); + if (disable_sdei_nmi_watchdog) + lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1550,7 +1552,9 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); - lockup_detector_init(); + /* sdei_watchdog needs to be initialized after sdei_init */ + if (!disable_sdei_nmi_watchdog) + lockup_detector_init(); kunit_run_all_tests(); -- Gitee From e4f603cc3a25cfa8f03bd794f26eda558839f110 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:29 +0800 Subject: [PATCH 456/953] kprobes/arm64: Blacklist sdei watchdog callback functions ANBZ: #8621 commit 9c2c933d3e9ac36f9f77391fe63ffbb145a566d6 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Functions called in sdei_handler are not allowed to be kprobed, so marked them as NOKPROBE_SYMBOL. There are so many functions in 'watchdog_check_timestamp()'. Luckily, we don't need 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP' now. So just make CONFIG_SDEI_WATCHDOG depends on !CONFIG_HARDLOCKUP_CHECK_TIMESTAMP in case someone add 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP' in the future. Signed-off-by: Xiongfeng Wang Reviewed-by: Yang Yingliang Reviewed-by: Hanjun Guo Conflicts: kernel/watchdog.c kernel/watchdog_hld.c Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 2 ++ kernel/watchdog.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 155f36e24699..6f43496de56e 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -14,6 +14,7 @@ #include #include #include +#include #include /* We use the secure physical timer as SDEI NMI watchdog timer */ @@ -80,6 +81,7 @@ static int sdei_watchdog_callback(u32 event, return 0; } +NOKPROBE_SYMBOL(sdei_watchdog_callback); static void sdei_nmi_watchdog_bind(void *data) { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5cd6d4e26915..9e349a943cdd 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static bool is_hardlockup(unsigned int cpu) return false; } +NOKPROBE_SYMBOL(is_hardlockup); static void watchdog_hardlockup_kick(void) { @@ -184,6 +186,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) per_cpu(watchdog_hardlockup_warned, cpu) = false; } } +NOKPROBE_SYMBOL(watchdog_hardlockup_check); #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ -- Gitee From af5784bae7443d99014d782ecc8087552e395cfb Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 1 Feb 2024 15:51:37 +0800 Subject: [PATCH 457/953] watchdog: Support watchdog_sdei coexist with existing watchdogs ANBZ: #8621 commit f61b11535a0bcb5c0a90f626a757cb710d71409c openeuler kunpeng inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I90N2C CVE: NA ---------------------------------------------------------------------- Currently we cannot use watchdog_{perf, buddy} if CONFIG_SDEI_WATCHDOG=y. Not all the platforms has watchdog_sdei so this patch tries to make watchdog_sdei coexist with other watchdogs. Only one watchdog will finally works. By default watchdog_sdei will be used. If boot with "disable_sdei_nmi_watchdog", other watchdogs will be used if probed. Signed-off-by: Yicong Yang Signed-off-by: Jie Liu Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 6 +++--- include/linux/nmi.h | 6 ++++++ kernel/watchdog.c | 16 ++++++++++++---- lib/Kconfig.debug | 2 -- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 6f43496de56e..c7b12806364e 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,7 +25,7 @@ bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); -void watchdog_hardlockup_enable(unsigned int cpu) +void sdei_watchdog_hardlockup_enable(unsigned int cpu) { int ret; @@ -45,7 +45,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) } } -void watchdog_hardlockup_disable(unsigned int cpu) +void sdei_watchdog_hardlockup_disable(unsigned int cpu) { int ret; @@ -106,7 +106,7 @@ void sdei_watchdog_clear_eoi(void) sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); } -int __init watchdog_hardlockup_probe(void) +int __init sdei_watchdog_hardlockup_probe(void) { int ret; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 7bd446acad24..43dd3a79fdf2 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -236,10 +236,16 @@ static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif #ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_hardlockup_enable(unsigned int cpu); +void sdei_watchdog_hardlockup_disable(unsigned int cpu); void sdei_watchdog_clear_eoi(void); +int sdei_watchdog_hardlockup_probe(void); extern bool disable_sdei_nmi_watchdog; #else +static inline void sdei_watchdog_hardlockup_enable(unsigned int cpu) { } +static inline void sdei_watchdog_hardlockup_disable(unsigned int cpu) { } static inline void sdei_watchdog_clear_eoi(void) { } +static inline int sdei_watchdog_hardlockup_probe(void) { return -ENODEV; } #define disable_sdei_nmi_watchdog 1 #endif diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 9e349a943cdd..11102420a2c7 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -562,8 +562,12 @@ static void watchdog_enable(unsigned int cpu) /* Initialize timestamp */ update_touch_ts(); /* Enable the hardlockup detector */ - if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) - watchdog_hardlockup_enable(cpu); + if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) { + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_enable(cpu); + else + sdei_watchdog_hardlockup_enable(cpu); + } } static void watchdog_disable(unsigned int cpu) @@ -577,7 +581,10 @@ static void watchdog_disable(unsigned int cpu) * delay between disabling the timer and disabling the hardlockup * detector causes a false positive. */ - watchdog_hardlockup_disable(cpu); + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_disable(cpu); + else + sdei_watchdog_hardlockup_disable(cpu); hrtimer_cancel(hrtimer); wait_for_completion(this_cpu_ptr(&softlockup_completion)); } @@ -1022,7 +1029,8 @@ void __init lockup_detector_init(void) cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_TYPE_TIMER)); - if (!watchdog_hardlockup_probe()) + if ((!disable_sdei_nmi_watchdog && !sdei_watchdog_hardlockup_probe()) || + !watchdog_hardlockup_probe()) watchdog_hardlockup_available = true; else allow_lockup_detector_init_retry = true; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a6b4a18190da..c6e6b8e70b73 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1104,7 +1104,6 @@ config HARDLOCKUP_DETECTOR_PERF depends on HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH - depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_BUDDY @@ -1113,7 +1112,6 @@ config HARDLOCKUP_DETECTOR_BUDDY depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH - depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_ARCH -- Gitee From 52e2ba36a38d605acb2fc687ceed8e3e2d2c6939 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 26 Mar 2024 15:58:31 +0800 Subject: [PATCH 458/953] anolis: config: arm64: Enable SDEI nmi_watchdog support ANBZ: #8621 Enable SDEI nmi_watchdog support on ARM64 platform. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index b2a67eec8bf0..e2d73d630bf0 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -6919,6 +6919,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 092a98239318..e69182e8882f 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -6879,6 +6879,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y -- Gitee From ca3fdc606ec37df2deaac856546cfc66e42d23ca Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 26 Mar 2024 15:09:33 +0800 Subject: [PATCH 459/953] anolis: configs: add CONFIG_DWC_PCIE_PMU config ANBZ: #8565 add CONFIG_DWC_PCIE_PMU config to anolis_defconfig and anolis-debug_defconfig Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2953 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index e2d73d630bf0..43c9990b1068 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -5760,6 +5760,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index e69182e8882f..0e3103e1b10c 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -5756,6 +5756,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set -- Gitee From babb4367fa8777b8e266a6dccb61dfb7403036c3 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 19 Mar 2024 16:41:00 +0800 Subject: [PATCH 460/953] anolis: Add MWAIT Cx support for Zhaoxin CPUs ANBZ: #7809 When the processor is idle, low-power idle states (C-states) can be used to save power. For Zhaoxin processors, there are two methods to enter idle states. One is HLT instruction and legacy method of I/O reads from the ACPI-defined register (known as P_LVLx), the other one is MWAIT instruction with idle states hints. Default for legacy operating system, HLT and P_LVLx I/O reads are used for Zhaoxin Processors to enter idle states, but we have checked on some Zhaoxin platform that MWAIT instruction is more efficient than P_LVLx I/O reads and HLT, so we add MWAIT Cx support for Zhaoxin Processors. Signed-off-by: leoliu-oc Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2699 --- arch/x86/include/asm/acpi.h | 3 ++- arch/x86/kernel/acpi/cstate.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index f896eed4516c..1f5fe16484fa 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -102,7 +102,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_proc_cap_bits(u32 *cap) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 401808b47af3..90f22148acc7 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -221,7 +221,9 @@ static int __init ffh_cstate_init(void) if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && - c->x86_vendor != X86_VENDOR_HYGON) + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); -- Gitee From bb362715741fc762e1f75cf8a4c0ad465f404222 Mon Sep 17 00:00:00 2001 From: liyuting Date: Tue, 12 Mar 2024 09:29:05 +0800 Subject: [PATCH 461/953] anolis: gic: Add gic support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add gic support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/Kconfig.platforms | 6 + drivers/irqchip/Kconfig | 8 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-gic-phytium-2500-its.c | 5734 ++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500.c | 2898 +++++++++ include/acpi/actbl2.h | 3 +- include/linux/irqchip/arm-gic-phytium-2500.h | 661 ++ 7 files changed, 9310 insertions(+), 1 deletion(-) create mode 100644 drivers/irqchip/irq-gic-phytium-2500-its.c create mode 100644 drivers/irqchip/irq-gic-phytium-2500.c create mode 100644 include/linux/irqchip/arm-gic-phytium-2500.h diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 6069120199bb..62b813d80700 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -244,6 +244,12 @@ config ARCH_NPCM General support for NPCM8xx BMC (Arbel). Nuvoton NPCM8xx BMC based on the Cortex A35. +config ARCH_PHYTIUM + bool "Phytium SoC Family" + select ARM_GIC_PHYTIUM_2500 + help + This enables support for Phytium ARMv8 SoC family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index a75378efc2c2..9944776010a0 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -89,6 +89,14 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 466eb0bd2b52..246aa0603d6e 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 000000000000..d1ecf059a39f --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,5734 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) + +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + void __iomem *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + int vlpi_redist_offset; +}; + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count_ft2500); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + return d->hwirq - its_dev->event_map.lpi_base; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static struct irq_chip its_vpe_irq_chip; + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vpe *vpe = NULL; + int cpu; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_invall_cmd.col; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr, vconf_addr; + u64 target; + bool alloc; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (!require_its_list_vmovp(vpe->its_vm, its)) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); +} + +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (map) { + va = page_address(map->vm->vprop_page); + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void __direct_lpi_inv(struct irq_data *d, u64 val) +{ + void __iomem *rdbase; + unsigned long flags; + int cpu; + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + __direct_lpi_inv(d, val); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) + return; + + map->db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issuing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + unsigned int skt_t1, skt_t2, cpu_idx; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + cpu_idx = its_cpumask_select(its_dev, mask_val, cpu_mask); + skt_t1 = (cpu_logical_map(cpu_idx) >> 16) & 0xff; + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); + skt_t2 = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_t1 != skt_t2) + cpu = cpu_idx; + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } + + return 0; +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map; + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = *map; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!is_v4(its_dev->its)) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + bitmap_free(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_range(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char * const its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + struct page *page; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_BASER_SHAREABILITY_MASK; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order > MAX_ORDER) { + new_order = MAX_ORDER; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", + &its->phys_base, its_base_type_string[type], + device_ids(its), ids); + } + + *order = new_order; + + return indirect; +} + +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + + case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + sibling = find_sibling_its(its); + if (sibling != NULL) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + unsigned long mpid; + phys_addr_t its_phys_base; + unsigned long skt_id; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < device_ids(its)); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + int cpu; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!is_v4(its)) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + bitmap_free(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + cpu = its_cpumask_first(its_dev, cpu_mask); + + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditioned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int from, cpu = cpumask_first(mask_val); + unsigned long flags; + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base, 0, 0); + + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else + its_vpe_send_cmd(vpe, its_send_inv); +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + raw_spin_lock_init(&vpe->vpe_lock); + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct irq_chip *irqchip = &its_vpe_irq_chip; + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + irqchip, vm->vpes[i]); + set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. + */ + if (!gic_requires_eager_mapping()) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, false); + } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &its_msi_domain_ops; + info->data = its; + + inner_domain = irq_domain_create_hierarchy(its_parent, + its->msi_domain_flags, 0, + its->fwnode_handle, &its_domain_ops, + info); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) + return -ENOMEM; + + /* Use the last possible DevID */ + devid = GENMASK(device_ids(its) - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct its_node *its) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &its->phys_base); + return -EINVAL; + } + + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &its->phys_base, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct its_node *its) +{ + u64 baser, tmp; + struct page *page; + u32 ctlr; + int err; + + if (is_v4(its)) { + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); + if (err < 0) + goto out; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &its->phys_base, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); + } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out; + } + + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &its->phys_base, its->mpidr, svpet); + } + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_unmap_sgir; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (is_v4(its)) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-phytium-2500-its", }, + {}, +}; + +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + int err; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) + return; + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct its_node *its; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); + if (!err) + return 0; + +node_err: + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + bool has_v4_1 = false; + int err; + + gic_rdists = rdists; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; + + if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 000000000000..f9f3b591be00 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +#define T241_CHIPS_MAX 4 +static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; +static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); + +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis_ft2500); + +extern struct static_key_false gic_nonsecure_priorities; + +/* + * When the Non-secure world has access to group 0 interrupts (as a + * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will + * return the Distributor's view of the interrupt priority. + * + * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority + * written by software is moved to the Non-secure range by the Distributor. + * + * If both are true (which is when gic_nonsecure_priorities gets enabled), + * we need to shift down the priority programmed by software to match it + * against the value returned by ICC_RPR_EL1. + */ +#define GICD_INT_RPR_PRI(priority) \ + ({ \ + u32 __priority = (priority); \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __priority = 0x80 | (__priority >> 1); \ + \ + __priority; \ + }) + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info __initdata; +static DEFINE_PER_CPU(bool, has_rss_ft2500); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline bool gic_irq_in_rdist(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base_alias(struct irq_data *d) +{ + if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 chip; + + /* + * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} + * registers are directed to the chip that owns the SPI. The + * the alias region can also be used for writes to the + * GICD_In{E} except GICD_ICENABLERn. Each chip has support + * for 320 {E}SPIs. Mappings for all 4 chips: + * Chip0 = 32-351 + * Chip1 = 352-671 + * Chip2 = 672-991 + * Chip3 = 4096-4415 + */ + switch (__get_intid_range(hwirq)) { + case SPI_RANGE: + chip = (hwirq - 32) / 320; + break; + case ESPI_RANGE: + chip = 3; + break; + default: + unreachable(); + } + return t241_dist_base_alias[chip]; + } + + return gic_data.dist_base; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if (mpidr & 0xFFFF) // either Aff1 or Aff0 is not zero + return; + + rbase = rbase + 64 * SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; // next redistributor + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + unsigned int skt; + + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); // RD from SGI base + rbase = rbase + SZ_128K; + } + } // core 0 of each socket + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi_ft2500(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis_ft2500); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) +{ + switch (__get_intid_range(hwirq)) { + case PPI_RANGE: + return hwirq - 16; + case EPPI_RANGE: + return hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + return __gic_get_ppi_index(d->hwirq); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi_ft2500()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi_ft2500())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(gic_irq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base, *rbase; + u32 offset, index, skt; + int ret, i; + unsigned long mpidr; + + range = get_intid_range(d); + + /* Interrupt configuration for SGIs can't be changed */ + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset, NULL); + gic_do_wait_for_rwp(base); + } + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_cpu_to_affinity(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 aff; + + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} + +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Deactivate the interrupt when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} + +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi_ft2500()) + return false; + + return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); +} + +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi_ft2500() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val, skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff */ + gic_dist_config(base, GIC_LINE_NR, NULL); + gic_do_wait_for_rwp(base); + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1, and wait for it to drain */ + writel_relaxed(val, base + GICD_CTLR); + gic_dist_wait_for_rwp(); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_cpu_to_affinity(smp_processor_id()); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr; + u64 typer; + u32 aff; + u32 aff2_skt; + u32 redist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + redist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != redist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + + raw_spin_lock_init(&gic_data_rdist()->rd_lock); + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanup */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GICv3 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = gic_cpu_to_affinity(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else if (gic_supports_nmi_ft2500()) { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + * + * The boot CPU calls this function before enabling NMI support, + * and as a result we'll never see this warning in the boot path + * for that CPU. + */ + if (static_branch_unlikely(&gic_nonsecure_priorities)) + WARN_ON(!group0 || gic_dist_security_disabled()); + else + WARN_ON(group0 && !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss_ft2500, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss_ft2500, i) && per_cpu(has_rss_ft2500, cpu); + + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)gic_cpu_to_affinity(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return kstrtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 is zero + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr; + u16 tlist = 0; + + mpidr = gic_cpu_to_affinity(cpu); + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = gic_cpu_to_affinity(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + + if (WARN_ON(d->hwirq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(ishst); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, d->hwirq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void __init gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv3:starting", + gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (irq_skt != 0) + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + unsigned int skt; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + GICD_IROUTER + (index * 8); + reg = gic_dist_base(d) + offset + (index * 8); + val = gic_cpu_to_affinity(cpu); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GICv3-phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitioned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, + irq_hw_number_t hwirq) +{ + enum gic_intid_range range; + + if (!gic_data.ppi_descs) + return false; + + if (!is_of_node(fwspec->fwnode)) + return false; + + if (fwspec->param_count < 4 || !fwspec->param[3]) + return false; + + range = __get_intid_range(hwirq); + if (range != PPI_RANGE && range != EPPI_RANGE) + return false; + + return true; +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + unsigned int type, ret, ppi_idx; + irq_hw_number_t hwirq; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; + + if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) + return d == gic_data.domain; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + ppi_idx = __gic_get_ppi_index(hwirq); + return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + unsigned long ppi_intid; + struct device_node *np; + unsigned int ppi_idx; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); + if (WARN_ON_ONCE(ret)) + return 0; + + ppi_idx = __gic_get_ppi_index(ppi_intid); + ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static bool gic_enable_quirk_msm8996(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + + return true; +} + +static bool gic_enable_quirk_mtk_gicr(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; + + return true; +} + +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; +} + +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +#define T241_CHIPN_MASK GENMASK_ULL(45, 44) +#define T241_CHIP_GICDA_OFFSET 0x1580000 +#define SMCCC_SOC_ID_T241 0x036b0241 + +static bool gic_enable_quirk_nvidia_t241(void *data) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + unsigned long chip_bmask = 0; + phys_addr_t phys; + u32 i; + + /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ + if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) + return false; + + /* Find the chips based on GICR regions PHYS addr */ + for (i = 0; i < gic_data.nr_redist_regions; i++) { + chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, + (u64)gic_data.redist_regions[i].phys_base)); + } + + if (hweight32(chip_bmask) < 3) + return false; + + /* Setup GICD alias regions */ + for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { + if (chip_bmask & BIT(i)) { + phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; + phys |= FIELD_PREP(T241_CHIPN_MASK, i); + t241_dist_base_alias[i] = ioremap(phys, SZ_64K); + WARN_ON_ONCE(!t241_dist_base_alias[i]); + } + } + static_branch_enable(&gic_nvidia_t241_erratum); + return true; +} + +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { + .desc = "GICv3: Mediatek Chromebook GICR save problem", + .property = "mediatek,broken-save-restore-fw", + .init = gic_enable_quirk_mtk_gicr, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { + .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", + .iidr = 0x0402043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_nvidia_t241, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * ICC_PMR_EL1 register and the priority that software assigns to + * interrupts: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority + * ----------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ----------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ----------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * where non-secure means that the value is right-shifted by one and the + * MSB bit set, to make it fit in the non-secure priority range. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we use a different PMR value to mask IRQs + * and the rest of the values that we use remain unchanged. + */ + if (gic_has_group0() && !gic_dist_security_disabled()) + static_branch_enable(&gic_nonsecure_priorities); + + static_branch_enable(&supports_pseudo_nmis_ft2500); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(phys_addr_t dist_phys_base, + void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_phys_base = dist_phys_base; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { + /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + } + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + + gic_dist_init(); + gic_cpu_init(); + gic_smp_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + goto out_put_node; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); + continue; + } + + pr_info("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); + } + + pr_info("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *rdist_regs; + struct resource res; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + unsigned long skt; + + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return PTR_ERR(dist_base); + } + + dist_phys_base = res.start; + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3_soc_bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, + nr_redist_regions, redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + gic_request_region(redist->base_address, redist->length, "GICR"); + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + +static int __init +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + size_t size; + int i, err; + int skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + +#ifdef CONFIG_ACPI + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); +#endif + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(dist->base_address, acpi_data.dist_base, + acpi_data.redist_regs, acpi_data.nr_redist_regions, + 0, gsi_domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(gsi_domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 3751ae69432f..8104c262bbae 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -897,7 +897,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_APLIC = 26, ACPI_MADT_TYPE_PLIC = 27, ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */ - ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_OEM_RESERVED = 0x80, /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 000000000000..f212a29390bf --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_TYPER2 0x000C +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_TYPER2_nASSGIcap (1U << 8) +#define GICD_TYPER2_VIL (1U << 7) +#define GICD_TYPER2_VID GENMASK(4, 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_CES (1UL << 1) +#define GICR_CTLR_IR (1UL << 2) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_RVPEID (1U << 7) +#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) +#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) + +#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) +#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) +#define GICR_INVLPIR_V GENMASK_ULL(63, 63) + +#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID +#define GICR_INVALLR_V GICR_INVLPIR_V + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +/* + * GICv4.1 VPROPBASER reinvention. A subtle mix between the old + * VPROPBASER and ITS_BASER. Just not quite any of the two. + */ +#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) +#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) +#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) +#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) +#define GICR_VPROPBASER_4_1_Z (1ULL << 52) +#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) +#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, + * also use the above Valid, PendingLast and Dirty. + */ +#define GICR_VPENDBASER_4_1_DB (1ULL << 62) +#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) +#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) +#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) + +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_MPIDR 0x0018 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) +#define GITS_TYPER_VMAPP (1ULL << 40) +#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) +#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PMHE_SHIFT 6 +#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct irq_domain; +struct fwnode_handle; +int __init its_lpi_memreserve_init(void); +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif -- Gitee From dd1f8caea50ef94846bbcd9a575251e331738aab Mon Sep 17 00:00:00 2001 From: liyuting Date: Thu, 21 Mar 2024 11:49:51 +0800 Subject: [PATCH 462/953] anolis: kdump: Add kdump support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add kdump support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/include/asm/cputype.h | 4 +++ arch/arm64/kernel/smp.c | 34 ++++++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500-its.c | 32 ++++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500.c | 18 ++++++++++++ 4 files changed, 88 insertions(+) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 52f076afeb96..e88431d87fc3 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -54,6 +54,7 @@ #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 +#define ARM_CPU_IMP_PHYTIUM 0x70 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -89,6 +90,7 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 @@ -143,6 +145,8 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 960b98b43506..b8d2763c5dd7 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -501,6 +502,34 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI + +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk as interrupts like + * SPI and LPI(except SGI) can't communicate across cpu sockets in this server + * platform. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can boot up and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + return false; +} +#endif + static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) @@ -550,6 +579,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ set_cpu_logical_map(cpu_count, hwid); diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c index d1ecf059a39f..5685f5f901a1 100644 --- a/drivers/irqchip/irq-gic-phytium-2500-its.c +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -1719,6 +1719,21 @@ static int its_cpumask_select(struct its_device *its_dev, cpu = cpumask_any_and(mask_val, cpu_mask); cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; } @@ -3056,6 +3071,9 @@ static bool enabled_lpis_allowed(void) phys_addr_t addr; u64 val; + if (is_kdump_kernel()) + return true; + /* Check whether the property table is in a reserved region */ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); addr = val & GENMASK_ULL(51, 12); @@ -3704,6 +3722,20 @@ static int its_cpumask_first(struct its_device *its_dev, if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) cpus = cpu; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; } diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c index f9f3b591be00..dbeeb795b581 100644 --- a/drivers/irqchip/irq-gic-phytium-2500.c +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1566,6 +1567,20 @@ static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val cpu = cpumask_any_and(mask_val, cpu_online_mask); cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (irq_skt == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } return cpus; } @@ -2833,6 +2848,9 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) #ifdef CONFIG_ACPI mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (is_kdump_kernel()) + mars3_sockets_bitmap = 0x3; + if (mars3_sockets_bitmap == 0) { mars3_sockets_bitmap = 0x1; pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); -- Gitee From 40f858874079a1ff7fd5d98ffa6d22d701370092 Mon Sep 17 00:00:00 2001 From: liyuting Date: Thu, 21 Mar 2024 13:56:37 +0800 Subject: [PATCH 463/953] anolis: iommu: Add iommu support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add iommu support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/include/asm/cputype.h | 9 +++++ .../arm64/include/asm/phytium_machine_types.h | 37 +++++++++++++++++++ drivers/iommu/arm/arm-smmu/arm-smmu.c | 25 ++++++++++++- drivers/irqchip/irq-gic-v3-its.c | 9 +++++ drivers/pci/quirks.c | 7 ++++ 5 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/phytium_machine_types.h diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index e88431d87fc3..4fd36d930f2d 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -55,6 +55,7 @@ #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_PHYTIUM 0x70 + #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -90,6 +91,10 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 #define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 @@ -145,6 +150,10 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) #define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) diff --git a/arch/arm64/include/asm/phytium_machine_types.h b/arch/arm64/include/asm/phytium_machine_types.h new file mode 100644 index 000000000000..8aed50daca4b --- /dev/null +++ b/arch/arm64/include/asm/phytium_machine_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Authors: Wang Yinfeng . + */ + +#ifndef _MACHINE_TYPE_H_ +#define _MACHINE_TYPE_H_ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d6d1a2a55cc0..8ce9d33e7840 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -35,6 +35,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include #include "arm-smmu.h" @@ -51,6 +55,7 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +#define SMR_MASK_SHIFT 16 static int force_stage; module_param(force_stage, int, S_IRUGO); @@ -1363,6 +1368,19 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) return ERR_PTR(-ENODEV); } +#ifdef CONFIG_ARCH_PHYTIUM +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + + for (i = 0; i < num; i++) { + u32 fwid = FWID_READ(fwspec->ids[i]); + + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); @@ -1458,7 +1476,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) mutex_unlock(&smmu->stream_map_mutex); return ERR_PTR(-EINVAL); } - +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_s2500()) + break; + if (typeof_ft2000plus() && !smmu->s2crs[idx].group) + continue; +#endif group = smmu->s2crs[idx].group; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 676c9250d3f2..1328199e5029 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -37,6 +37,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include "irq-gic-common.h" #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) @@ -1730,6 +1734,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return; +#endif + iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 352284a22e0d..cffdd0e0b90b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5089,6 +5089,13 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, +#ifdef CONFIG_ARCH_PHYTIUM + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, +#endif + /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, -- Gitee From 70120c0ae6705315ffb3f4cfd9e8b263cb65e4cf Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 11:41:06 +0800 Subject: [PATCH 464/953] anolis: Add support for Zhaoxin GMI SM3 Secure Hash algorithm ANBZ: #7809 This SM3 algorithm driver is developed to support the SM3 instruction, making user develop their applications with both high performance and high security. Block-size 16 64 256 1024 2048 4096 8192 SM3-Generic 254.52 607.60 1055.30 1268.67 1314.55 1323.60 1379.98 SM3-GMI 505.99 1412.57 3191.53 4635.25 4993.05 5156.04 5250.53 Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/crypto/Kconfig | 14 ++ arch/x86/crypto/Makefile | 2 + arch/x86/crypto/sm3-zhaoxin-gmi.c | 198 +++++++++++++++++++++++++++++ arch/x86/include/asm/cpufeatures.h | 2 + 4 files changed, 216 insertions(+) create mode 100644 arch/x86/crypto/sm3-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 9bbfd01cfa2f..7b105b70c664 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -477,6 +477,20 @@ config CRYPTO_SM3_AVX_X86_64 If unsure, say N. +config CRYPTO_SM3_ZHAOXIN_GMI + tristate "Hash functions: SM3 (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 cipher algorithms (Zhaoxin GMI Instruction). + + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). + It is part of the Chinese Commercial Cryptography suite. + + If unsure, say N. + config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "Hash functions: GHASH (CLMUL-NI)" depends on X86 && 64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 9aa46093c91b..63b373b5ebe5 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,8 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o + quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c new file mode 100644 index 000000000000..e393133d572d --- /dev/null +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sm3_zhaoxin_gmi.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B +}; +EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM3 detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM3 is available\n"); + return 0; + } + return -ENODEV; +} + +void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +{ + unsigned long in, out, cnt; + + if (!blockcnt) + return; + + in = (unsigned long)inp; + out = (unsigned long)(sst->state); + cnt = (unsigned long)blockcnt; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n" + "pushq %%rbx\n" + "pushq %%rsi\n" + "pushq %%rdi\n" + "movq $-1, %%rax\n" + "movq $0x20, %%rbx\n" + #else + "pushl %%ebp\n" + "pushl %%ebx\n" + "pushl %%esi\n" + "pushl %%edi\n" + "movl $-1, %%eax\n" + "movl $0x20, %%ebx\n" + #endif + ".byte 0xf3,0x0f,0xa6,0xe8\n" + #ifdef __x86_64__ + "popq %%rdi\n" + "popq %%rsi\n" + "popq %%rbx\n" + "popq %%rbp\n" + #else + "popl %%edi\n" + "popl %%esi\n" + "popl %%ebx\n" + "popl %%ebp\n" + #endif + : + : "S"(in), "D"(out), "c"(cnt) + : + ); +} + +static inline int zx_sm3_init(struct shash_desc *desc) +{ + struct sm3_state *sctx; + + if (!desc) + return -EINVAL; + + sctx = shash_desc_ctx(desc); + + sctx->state[0] = 0x6f168073UL; + sctx->state[1] = 0xb9b21449UL; + sctx->state[2] = 0xd7422417UL; + sctx->state[3] = 0x00068adaUL; + sctx->state[4] = 0xbc306fa9UL; + sctx->state[5] = 0xaa383116UL; + sctx->state[6] = 0x4dee8de3UL; + sctx->state[7] = 0x4e0efbb0UL; + + sctx->count = 0; + + return 0; +} + +static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + + memcpy(digest, sctx->state, SM3_DIGEST_SIZE); + + *sctx = (struct sm3_state){}; + return 0; +} + +int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); +} +EXPORT_SYMBOL(zx_sm3_update); + +static int zx_sm3_final(struct shash_desc *desc, u8 *out) +{ + sm3_base_do_finalize(desc, sm3_generic_block_fn); + + return zx_sm3_base_finish(desc, out); +} + +int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) +{ + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + + return zx_sm3_final(desc, hash); +} +EXPORT_SYMBOL(zx_sm3_finup); + +static struct shash_alg zx_sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init zx_sm3_generic_mod_init(void) +{ + if (gmi_available() == 0) + return crypto_register_shash(&zx_sm3_alg); + + pr_warn("GMI is unavailable on this platform."); + return -ENODEV; +} + +static void __exit zx_sm3_generic_mod_fini(void) +{ + crypto_unregister_shash(&zx_sm3_alg); +} + +module_init(zx_sm3_generic_mod_init); +module_exit(zx_sm3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("sm3-zhaoxin"); +MODULE_ALIAS_CRYPTO("sm3-zhaoxin-gmi"); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 8262eafc95a1..64d8741c0657 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -148,6 +148,8 @@ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ +#define X86_FEATURE_CCS_EN (5*32 + 5) /* "sm3/4" SM3/4 enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -- Gitee From 8e298702e0d2abc2ea532b0198e6e8b7e4faab2e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 11:49:56 +0800 Subject: [PATCH 465/953] anolis: Add support for Zhaoxin GMI SM4 Block Cipher algorithm ANBZ: #7809 This SM4 algorithm driver is developed to support the SM4 instruction, making user develop their applications with both high performance and high security. BlockSize 16 64 128 256 1024 1424 4096 SM4-Generic ECB Encryption 392.36 464.93 476.03 481.03 485.26 485.28 485.67 Decryption 411.20 446.40 475.43 481.07 484.80 485.27 486.17 CBC Encryption 349.39 424.60 439.53 446.25 451.78 452.87 453.20 Decryption 339.45 423.40 439.27 450.06 457.53 458.02 459.02 CFB Encryption 337.84 428.52 441.12 451.53 456.78 456.14 458.40 Decryption 367.30 403.70 414.36 418.61 421.63 421.37 422.56 CTR Encryption 366.54 448.51 439.18 465.56 472.31 471.36 474.39 Decryption 384.44 448.35 458.30 466.45 472.40 471.36 474.31 SM4-GMI ECB Encryption 811.68 2694.82 4086.80 5870.03 7975.80 8168.89 8510.23 Decryption 849.63 2823.88 4284.88 5833.04 7973.33 8168.45 8506.51 CBC Encryption 701.68 1943.75 2706.77 3365.73 4117.24 4201.31 4299.83 Decryption 709.10 1860.98 2621.00 3296.41 4102.92 4177.61 4298.36 CFB Encryption 711.95 1915.53 2712.57 3371.73 4065.69 4007.26 4310.40 Decryption 713.16 1952.34 2585.72 3363.57 4128.37 4204.84 4305.43 CTR Encryption 633.33 2165.92 3221.97 4250.08 5714.66 5556.47 5956.50 Decryption 662.45 2151.06 3221.11 4248.79 5677.76 5497.75 5968.06 Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/crypto/Kconfig | 20 + arch/x86/crypto/Makefile | 1 + arch/x86/crypto/sm4-zhaoxin-gmi.c | 858 ++++++++++++++++++++++++++++++ 3 files changed, 879 insertions(+) create mode 100644 arch/x86/crypto/sm4-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 7b105b70c664..cbe8eef473ec 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -231,6 +231,26 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 If unsure, say N. +config CRYPTO_SM4_ZHAOXIN_GMI + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR, CFB, OFB (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_SKCIPHER + select CRYPTO_SIMD + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (Zhaoxin GMI Instruction). + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + This is SM4 optimized implementation using Zhaoxin GMI + instruction set for block cipher. + + If unsure, say N. + config CRYPTO_TWOFISH_586 tristate "Ciphers: Twofish (32-bit)" depends on (X86 || UML_X86) && !64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 63b373b5ebe5..4230829a6648 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -110,6 +110,7 @@ obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c new file mode 100644 index 000000000000..ec57b4ca4644 --- /dev/null +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SM4_ECB (1<<6) +#define SM4_CBC (1<<7) +#define SM4_CFB (1<<8) +#define SM4_OFB (1<<9) +#define SM4_CTR (1<<10) + +#define ZX_GMI_ALIGNMENT 16 + +#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) + +/* Control word. */ +struct sm4_cipher_data { + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + union { + u32 pad; + struct { + u32 encdec:1; + u32 func:5; + u32 mode:5; + u32 digest:1; + } b; + } cword; /* Control word */ + struct sm4_ctx keys; /* Encryption key */ +}; + +static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + unsigned long rax = sm4_data->cword.pad; + + // Set the flag for encryption or decryption + if (sm4_data->cword.b.encdec == 1) + rax &= ~0x01; + else + rax |= 0x01; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n\n" + "pushq %%rbx\n\n" + "pushq %%rcx\n\n" + "pushq %%rsi\n\n" + "pushq %%rdi\n\n" + #else + "pushl %%ebp\n\n" + "pushl %%ebx\n\n" + "pushl %%ecx\n\n" + "pushl %%esi\n\n" + "pushl %%edi\n\n" + #endif + ".byte 0xf3,0x0f,0xa7,0xf0\n" + #ifdef __x86_64__ + "popq %%rdi\n\n" + "popq %%rsi\n\n" + "popq %%rcx\n\n" + "popq %%rbx\n\n" + "popq %%rbp\n\n" + #else + "popl %%edi\n\n" + "popl %%esi\n\n" + "popl %%ecx\n\n" + "popl %%ebx\n\n" + "popl %%ebp\n\n" + #endif + : + : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + return iv; +} + +static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + u8 oiv[SM4_BLOCK_SIZE] = {0}; + u16 cnt_tmp; + u32 i; + u8 *in_tmp = (u8 *)input, *out_tmp = output; + + //Backup the original IV if it is not NULL. + if (iv) + memcpy(oiv, iv, SM4_BLOCK_SIZE); + + // Get the current counter. + cnt_tmp = GETU16(&iv[14]); + + // Get the available counter space before overflow. + cnt_tmp = 0x10000 - cnt_tmp; + + // + // Check there is enough counter space for the required blocks. + // + if (cnt_tmp < count) { + + // Process the first part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + // Only increase the counter by SW when overflow occurs. + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + for (i = 0; i < cnt_tmp; i++) + crypto_inc(iv, SM4_BLOCK_SIZE); + + out_tmp = output + cnt_tmp * SM4_BLOCK_SIZE; + in_tmp = (u8 *)(input + cnt_tmp * SM4_BLOCK_SIZE); + + // Get the number of data blocks that have not been encrypted. + cnt_tmp = count - cnt_tmp; + // Process the remaining part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + } else { + // Counter space is big enough, the counter will not overflow. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, count); + } + + // Restore the iv if not null + if (iv) + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + return iv; +} + +static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, + u8 *iv, struct sm4_cipher_data *sm4_data, u64 count) +{ + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + return rep_xcrypt(input, output, key, iv, &cw, 1); +} + +/** + * gmi_sm4_set_key - Set the sm4 key. + * @tfm: The %crypto_skcipher that is used in the context. + * @in_key: The input key. + * @key_len:The size of the key. + */ +int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) { + pr_warn("The key_len must be 16 bytes. please check\n"); + return -EINVAL; + } + + memcpy(ctx->rkey_enc, in_key, key_len); + memcpy(ctx->rkey_dec, in_key, key_len); + + return 0; +} +EXPORT_SYMBOL_GPL(gmi_sm4_set_key); + + +static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + + +static int ecb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int ecb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + + +/* + * sm4_cipher_ctr is used for ZX-E and newer + */ +static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks, nbytes; + int err; + u8 *iv, *dst, *src; + u8 keystream[SM4_BLOCK_SIZE]; + u32 i; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes/SM4_BLOCK_SIZE; + iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + for (i = 0; i < blocks; i++) + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; + } + + if (walk.nbytes == walk.total && nbytes > 0) { + rep_xcrypt_ecb_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, keystream, src, nbytes); + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt is used for ZX-E and newer + */ +static int ctr_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * ctr_decrypt is used for ZX-E and newer + */ +static int ctr_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * sm4_ctr_zxc is used for ZXC+ + */ +static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u8 *iv = NULL, *dst, *src; + u8 en_iv[SM4_BLOCK_SIZE] = {0}; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + + iv = rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + + dst += SM4_BLOCK_SIZE; + src += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + + // tail + if (walk.nbytes == walk.total && nbytes > 0) { + + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, en_iv, src, nbytes); + + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt_zxc is used for ZX-C+ + */ +static int ctr_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ctr_decrypt_zxc is used for ZX-C+ + */ +static int ctr_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ofb_encrypt is used for ZX-E and newer + */ +static int ofb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * ofb_decrypt is used for ZX-E and newer + */ +static int ofb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_ofb_zxc is used for ZX-C+ + */ +static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + + u32 n; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ + *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * ofb_encrypt_zxc is used for ZX-C+ + */ +static int ofb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + +/* + * ofb_decrypt_zxc is used for ZX-C+ + */ +static int ofb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + + +/* + * cfb_encrypt is used for ZX-E and newer. + */ +static int cfb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * cfb_decrypt is used for ZX-E and newer. + */ + +static int cfb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; + +} + +/* + * sm4_cfb_zxc is used for ZX-C+ + */ +static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u32 n; + size_t t; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + if (cw->cword.b.encdec) + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^= + *(size_t *)(walk.src.virt.addr + n); + + else + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { + t = *(size_t *)(walk.src.virt.addr + n); + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ t; + *(size_t *)(walk.iv + n) = t; + } + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * cfb_encrypt_zxc is used for ZX-C+ + */ +static int cfb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +/* + * cfb_decrypt_zxc is used for ZX-C+ + */ +static int cfb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + + { + .base = { + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + + { + .base = { + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, + }, + + { + .base = { + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, + }, + + { + .base = { + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, + } +}; + +static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; + +static int gmi_zxc_check(void) +{ + int f_zxc = 0; + + struct cpuinfo_x86 *c = &cpu_data(0); + + if ((c->x86 > 6)) { + f_zxc = 0; + } else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + ) { + f_zxc = 1; + } + + return f_zxc; +} + +/* + * Load supported features of the CPU to see if the SM4 is available. + */ +static int gmi_ccs_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + || (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM4 is detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM4 is available\n"); + return 0; + + } + return -ENODEV; +} + + +static void gmi_sm4_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++) + simd_skcipher_free(sm4_simd_algs[i]); + + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} +static int __init gmi_sm4_init(void) +{ + struct simd_skcipher_alg *simd; + const char *basename; + const char *algname; + const char *drvname; + int err; + int i; + + if (gmi_ccs_available() != 0) + return -ENODEV; + + if (gmi_zxc_check()) { + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { + + sm4_algs[i].encrypt = ctr_encrypt_zxc; + sm4_algs[i].decrypt = ctr_decrypt_zxc; + } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { + + sm4_algs[i].encrypt = cfb_encrypt_zxc; + sm4_algs[i].decrypt = cfb_decrypt_zxc; + + } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { + + sm4_algs[i].encrypt = ofb_encrypt_zxc; + sm4_algs[i].decrypt = ofb_decrypt_zxc; + } + } + } + + err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + algname = sm4_algs[i].base.cra_name + 2; + drvname = sm4_algs[i].base.cra_driver_name + 2; + basename = sm4_algs[i].base.cra_driver_name; + simd = simd_skcipher_create_compat(algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; + + sm4_simd_algs[i] = simd; + } + + return 0; + +unregister_simds: + gmi_sm4_exit(); + return err; +} + +late_initcall(gmi_sm4_init); +module_exit(gmi_sm4_exit); + +MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); +MODULE_AUTHOR("GRX"); +MODULE_LICENSE("GPL"); -- Gitee From 753e98231ca8495543c81e3cf89b3fb9bef5758d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 26 Mar 2024 15:17:11 +0800 Subject: [PATCH 466/953] anolis: configs: Add Zhaoxin SM3 and SM4 algorithm configs ANBZ: #7809 Add Zhaoxin SM3 and SM4 algorithm dirvers: CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/configs/anolis_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 145ac54de178..31d935f9b299 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7376,6 +7376,7 @@ CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m @@ -7393,6 +7394,7 @@ CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m -- Gitee From 034cca95118a24e7d2ad5c7887eaaa7ceb20d535 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 15:49:52 +0800 Subject: [PATCH 467/953] anolis: Add support for Zhaoxin AES algorithm ANBZ: #7809 Some Zhaoxin processors come with an integrated crypto engine (so called Zhaoxin ACE, Advanced Cryptography Engine) that provides instructions for very fast cryptographic operations with supported AES algorithms. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- drivers/crypto/Kconfig | 25 ++ drivers/crypto/Makefile | 1 + drivers/crypto/padlock-aes.c | 2 +- drivers/crypto/zhaoxin-aes.c | 523 +++++++++++++++++++++++++++++++++++ 4 files changed, 550 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/zhaoxin-aes.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index b03f7ed92793..960a4f0ebf77 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -52,6 +52,31 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 94c8b187f739..dda3c310f065 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1be549a07a21..f0c3127941ae 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 000000000000..e1d029fa9d1a --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __packed + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __aligned(PADLOCK_ALIGNMENT); + +/* + * Whenever making any changes to the following structure *make sure* you keep E, d_data and cword + * aligned on 16 Bytes boundaries and the Hardware can access 16 * 16 bytes of E and d_data (only + * the first 15 * 16 bytes matter but the HW reads more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + u32 d_data[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, zx_paes_last_cword); + +/* Tells whether the ACE is capable to generate the extended key for a given key_len. */ +static inline int aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping as it's possible that the + * capability will be added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) +{ + return aes_ctx_common(crypto_skcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) + return -EINVAL; + + /* + * If the hardware is capable of generating the extended key itself we must supply the + * plain key for both encryption and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (aes_expandkey(&gen_aes, in_key, key_len)) + return -EINVAL; + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(zx_paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(zx_paes_last_cword, cpu)) + per_cpu(zx_paes_last_cword, cpu) = NULL; + + return 0; +} + +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(zx_paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(zx_paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they generate a spurious DNA fault + * when CR0.TS is '1'. Fortunately, the kernel doesn't use CR0.TS. + */ +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, + int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, + u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, +}; + +static int cbc_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (!!ret) + goto aes_err; + + ret = crypto_register_skcipher(&ecb_aes_alg); + if (!!ret) + goto ecb_aes_err; + + ret = crypto_register_skcipher(&cbc_aes_alg); + if (!!ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_skcipher(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("aes"); -- Gitee From 74204ebb3cb9d11453575f0eb24440bef6fb57b5 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 15:02:00 +0800 Subject: [PATCH 468/953] anolis: Add support for Zhaoxin SHA algorithm ANBZ: #7809 Some Zhaoxin processors come with an integrated crypto engine (so called Zhaoxin ACE, Advanced Cryptography Engine) that provides instructions for very fast cryptographic operations with supportedSHA1/SHA256 algorithms. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- drivers/crypto/Kconfig | 14 ++ drivers/crypto/Makefile | 1 + drivers/crypto/padlock-sha.c | 2 +- drivers/crypto/zhaoxin-sha.c | 304 +++++++++++++++++++++++++++++++++++ 4 files changed, 320 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/zhaoxin-sha.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 960a4f0ebf77..b84a921d293f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -77,6 +77,20 @@ config CRYPTO_DEV_ZHAOXIN_AES If unsure say M. The compiled module will be called zhaoxin-aes. +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index dda3c310f065..5247d2bf09ce 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a..04858dc8b597 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -491,7 +491,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 000000000000..840805f36838 --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +static inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented multiple-parts hash + * supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state) { + .state = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data */ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); -- Gitee From 05116f3631bec3881af452b53b9e34bf406949ad Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 26 Jan 2024 17:58:29 +0800 Subject: [PATCH 469/953] anolis: configs: add CONFIG_CRYPTO_DEV_ZHAOXIN seiries ANBZ: #7809 To add support for zhaoxin-aes/sha drivers, we need to add CONFIG_CRYPTO_ZHAOXIN series of configuration items: CONFIG_CRYPTO_DEV_ZHAOXIN=m CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- arch/x86/configs/anolis_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 31d935f9b299..23258f261dcb 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7405,6 +7405,9 @@ CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m CONFIG_CRYPTO_DEV_PADLOCK_AES=m CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set CONFIG_CRYPTO_DEV_CCP=y -- Gitee From 823d85e3ff3cd006083c1836dda9b233eee8342d Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Thu, 28 Mar 2024 14:52:07 +0800 Subject: [PATCH 470/953] anolis: kfence: handle kmemcg properly ANBZ: #8499 When kmemcg is enabled, the object vectors are allocated statically. See commit 8f0b364973034 ("mm: kfence: fix objcgs vector allocation") for detail. However, when turning to anolis kfence, we missed a "&". Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2964 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 571ef4dcccf8..d5329b1c560b 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -777,7 +777,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g __SetPageSlab(page); slab->slab_cache = cache; #ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)meta->objcg | MEMCG_DATA_OBJCGS; + slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; #endif #if defined(CONFIG_SLUB) slab->objects = 1; -- Gitee From 10623377c5bbef265cbafaed2ddb95114f14b213 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Mar 2024 16:30:23 +0800 Subject: [PATCH 471/953] anolis: crypto: x86/sm2 -add Zhaoxin SM2 algorithm implementation ANBZ: #7809 Add support for SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction. The purpose of this driver is to ensure that the application has both high performance and high security. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2708 --- arch/x86/crypto/Kconfig | 11 ++ arch/x86/crypto/Makefile | 1 + arch/x86/crypto/sm2-zhaoxin-gmi.c | 158 +++++++++++++++++++++++++++++ arch/x86/include/asm/cpufeatures.h | 2 + 4 files changed, 172 insertions(+) create mode 100644 arch/x86/crypto/sm2-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index cbe8eef473ec..2d2d807e3b00 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -553,4 +553,15 @@ config CRYPTO_CRCT10DIF_PCLMUL Architecture: x86_64 using: - PCLMULQDQ (carry-less multiplication) +config CRYPTO_SM2_ZHAOXIN_GMI + tristate "SM2 Cipher algorithm (Zhaoxin GMI Instruction)" + depends on X86 && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN) + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + help + SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + endmenu diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 4230829a6648..e5480c50a8d9 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,7 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM2_ZHAOXIN_GMI) += sm2-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o diff --git a/arch/x86/crypto/sm2-zhaoxin-gmi.c b/arch/x86/crypto/sm2-zhaoxin-gmi.c new file mode 100644 index 000000000000..a0430c6611fc --- /dev/null +++ b/arch/x86/crypto/sm2-zhaoxin-gmi.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2023 Shanghai Zhaoxin Semiconductor LTD. + * Authors: YunShen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCRATCH_SIZE (4 * 2048) + +#define SM2_CWORD_VERIFY 0x8 +#define SM2_VERIFY_PASS 1 + +struct sm2_cipher_data { + u8 pub_key[65]; /* public key */ +}; + +/* Load supported features of the CPU to see if the SM2 is available. */ +static int zhaoxin_gmi_available(void) +{ + if (!boot_cpu_has(X86_FEATURE_SM2_EN)) { + pr_err("can't enable hardware SM2 if Zhaoxin GMI SM2 is not enabled\n"); + return -ENODEV; + } + return 0; +} + +/* Zhaoxin sm2 verify function */ +static inline size_t zhaoxin_gmi_sm2_verify(unsigned char *key, unsigned char *hash, + unsigned char *sig, unsigned char *scratch) +{ + size_t result; + + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xc0" + : "=c"(result) + : "a"(hash), "b"(key), "d"(SM2_CWORD_VERIFY), "S"(scratch), "D"(sig) + : "memory"); + + return result; +} + +/* Zhaoxin sm2 verify function */ +static int _zhaoxin_sm2_verify(struct sm2_cipher_data *ec, unsigned char *hash, unsigned char *sig) +{ + unsigned char *scratch = kzalloc(SCRATCH_SIZE, GFP_KERNEL); + int ret = -EKEYREJECTED; + size_t result; + + result = zhaoxin_gmi_sm2_verify(ec->pub_key, hash, sig, scratch); + if (result == SM2_VERIFY_PASS) + ret = 0; + + kfree(scratch); + + return ret; +} + +static int zhaoxin_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + unsigned char *buffer; + int ret, buf_len; + + buf_len = req->src_len + req->dst_len; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, buf_len), buffer, buf_len, 0); + ret = _zhaoxin_sm2_verify(ec, buffer + req->src_len, buffer); + + kfree(buffer); + + return ret; +} + +static int zhaoxin_sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memcpy(ec->pub_key, key, keylen); + + return 0; +} + +static unsigned int zhaoxin_sm2_max_size(struct crypto_akcipher *tfm) +{ + /* Unlimited max size */ + return PAGE_SIZE; +} + +static int zhaoxin_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + return zhaoxin_gmi_available(); +} + +static void zhaoxin_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memset(ec, 0, sizeof(*ec)); +} + +static struct akcipher_alg zhaoxin_sm2 = { + .verify = zhaoxin_sm2_verify, + .set_pub_key = zhaoxin_sm2_set_pub_key, + .max_size = zhaoxin_sm2_max_size, + .init = zhaoxin_sm2_init_tfm, + .exit = zhaoxin_sm2_exit_tfm, + .base = { + .cra_name = "sm2", + .cra_driver_name = "zhaoxin-gmi-sm2", + .cra_priority = 150, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct sm2_cipher_data), + }, +}; + +static const struct x86_cpu_id zhaoxin_sm2_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SM2, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sm2_cpu_ids); + +static int __init zhaoxin_sm2_init(void) +{ + if (!x86_match_cpu(zhaoxin_sm2_cpu_ids)) + return -ENODEV; + + return crypto_register_akcipher(&zhaoxin_sm2); +} + +static void __exit zhaoxin_sm2_exit(void) +{ + crypto_unregister_akcipher(&zhaoxin_sm2); +} + +module_init(zhaoxin_sm2_init); +module_exit(zhaoxin_sm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("YunShen "); +MODULE_DESCRIPTION("SM2 Zhaoxin GMI Algorithm"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm2"); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 64d8741c0657..b4c7433c7451 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -146,6 +146,8 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32 + 0) /* SM2 ZhaoXin GMI present */ +#define X86_FEATURE_SM2_EN (5*32 + 1) /* SM2 ZhaoXin GMI enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ #define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ -- Gitee From b8916829488eccd0a9f763671b1448a96cff09a8 Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Tue, 21 Feb 2023 17:23:05 +0800 Subject: [PATCH 472/953] anolis: nsp: generate nsp command with variable nsp major version ANBZ: #8563 The most significant 4 bits of nsp command code should carry the ABI major version so that nsp command can be responded correctly. It is working well since current major version is 0. However management firmware is going to bump the major version to support multi-PF feature. So change the code to explicitly contain the major version into nsp command code. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 7136bc48530b..ee934663c6d9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -37,7 +37,8 @@ #define NSP_COMMAND 0x08 #define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) -#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_CODE_MJ_VER GENMASK_ULL(31, 28) +#define NSP_COMMAND_CODE GENMASK_ULL(27, 16) #define NSP_COMMAND_DMA_BUF BIT_ULL(1) #define NSP_COMMAND_START BIT_ULL(0) @@ -380,6 +381,7 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE_MJ_VER, state->ver.major) | FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | FIELD_PREP(NSP_COMMAND_START, 1)); -- Gitee From cc85bedc666ae9a371a6fd648cf283006df0b6fa Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Fri, 24 Feb 2023 13:49:01 +0800 Subject: [PATCH 473/953] anolis: nfp: bump the nsp major version to support multi-PF ANBZ: #8563 Currently NFP NICs implement single PF with multiple ports instantiated. While NFP3800 can support multiple PFs and one port per PF is more up-to-date, the management firmware will start to support multi-PF. Since it's incompatible with currenty implementation, the ABI major version is bumped. A new flag is also introduced to indicate whether it's multi-PF setup or single-PF setup. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 3 +++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 1 + drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 8 ++++---- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 71301dbd8fb5..274bdb8e62f2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -625,6 +625,9 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } + pf->multi_pf_support = pdev->multifunction; + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf_support ? "Multi" : "Single"); + err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 14a751bfe1fe..d0bfde2a0b2c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -141,6 +141,7 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; + bool multi_pf_support; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index ee934663c6d9..56682c530b26 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -59,7 +59,7 @@ #define NFP_CAP_CMD_DMA_SG 0x28 #define NSP_MAGIC 0xab10 -#define NSP_MAJOR 0 +#define NSP_MAJOR 1 #define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) @@ -248,14 +248,14 @@ static int nfp_nsp_check(struct nfp_nsp *state) state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); - if (state->ver.major != NSP_MAJOR) { + if (state->ver.major > NSP_MAJOR) { nfp_err(cpp, "Unsupported ABI %hu.%hu\n", state->ver.major, state->ver.minor); return -EINVAL; } if (state->ver.minor < NSP_MINOR) { - nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n", - NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR); + nfp_err(cpp, "ABI too old to support NIC operation (x.%u < x.%u), please update the management FW on the flash\n", + state->ver.minor, NSP_MINOR); return -EINVAL; } -- Gitee From b49f15a9480fb381e9de6f609502f03e942fede7 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Mon, 27 Feb 2023 11:05:13 +0800 Subject: [PATCH 474/953] anolis: nfp: change application firmware loading flow in multi-PF setup ANBZ: #8563 In multi-PF setup, all PFs share the single application firmware. Each PF is treated equally, and first-come-first-served. So the first step is to check firmware is loaded or not. And also loading firmware from disk and flash are treated consistently, both propagating the failure and setting `fw_loaded` flag. At last, firmware shouldn't be unloaded in this setup. The following commit will introduce a keepalive mechanism to let management firmware manage unloading. The flow is not changed in non-multi-PF setup. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 274bdb8e62f2..3ac39d3dbbeb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -528,6 +528,12 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; + /* Skip firmware loading in multi-PF setup if firmware is loaded. */ + if (pf->multi_pf_support && nfp_nsp_fw_loaded(nsp)) { + fw_loaded = true; + goto end; + } + fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); @@ -556,16 +562,27 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { + err = nfp_nsp_load_stored_fw(nsp); - /* Don't propagate this error to stick with legacy driver + /* Same logic with loading from disk when multi-PF. Othewise: + * + * Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. + * + * Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way. */ - if (!nfp_nsp_load_stored_fw(nsp)) + if (!err) { dev_info(&pdev->dev, "Finished loading stored FW image\n"); - /* Don't flag the fw_loaded in this case since other devices - * may reuse the firmware when configured this way - */ + if (pf->multi_pf_support) + fw_loaded = true; + } else { + if (pf->multi_pf_support) + dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); + else + err = 0; + } } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } @@ -577,9 +594,10 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1) + if (fw_loaded && ifcs == 1 && !pf->multi_pf_support) pf->unload_fw_on_remove = true; +end: return err < 0 ? err : fw_loaded; } -- Gitee From f1df49c4a42eeef6020d9deeb8e81a87e8ecaa4a Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Fri, 10 Mar 2023 18:16:08 +0800 Subject: [PATCH 475/953] anolis: nfp: don't skip firmware loading when it's pxe firmware in running ANBZ: #8563 In pxe boot case, the pxe firmware is not unloaded in some systems when booting completes. Driver needs to detect it so that it has chance to load the correct firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 3ac39d3dbbeb..d0f6bf8383e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -469,6 +469,30 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static bool +nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + const struct nfp_mip *mip; + + if (!pf->multi_pf_support || nfp_nsp_fw_loaded(nsp) <= 0) + return false; + + mip = nfp_mip_open(pf->cpp); + if (!mip) + return false; + + /* For the case that system boots from pxe, we need + * reload FW if pxe FW is running. + */ + if (!strncmp(nfp_mip_name(mip), "pxe", 3)) { + nfp_mip_close(mip); + return false; + } + + pf->mip = mip; + return true; +} + /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure @@ -528,8 +552,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; - /* Skip firmware loading in multi-PF setup if firmware is loaded. */ - if (pf->multi_pf_support && nfp_nsp_fw_loaded(nsp)) { + if (nfp_skip_fw_load(pf, nsp)) { fw_loaded = true; goto end; } @@ -851,7 +874,8 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_hwinfo_free; - pf->mip = nfp_mip_open(pf->cpp); + if (!pf->mip) + pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); -- Gitee From cbad3efa2b90d007f39a0dcf6ad57355577da0d7 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Mon, 27 Feb 2023 15:21:34 +0800 Subject: [PATCH 476/953] anolis: nfp: introduce keepalive mechanism for multi-PF setup ANBZ: #8563 In multi-PF setup, management firmware is in charge of application firmware unloading instead of driver by keepalive mechanism. A new NSP resource area is allocated for keepalive use with name "nfp.beat". Driver sets the magic number when keepalive is needed and periodically updates the PF's corresponding qword in "nfp.beat". Management firmware checks these PFs' qwords to learn whether and which PFs are alive, and will unload the application firmware if no PF is running. This only works when magic number is correct. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 109 ++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_main.h | 15 ++- .../net/ethernet/netronome/nfp/nfpcore/nfp.h | 4 + 3 files changed, 115 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index d0f6bf8383e4..93e234575a76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -469,12 +469,82 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + u8 __iomem *addr; + + /* Each PF has corresponding qword to beat: + * offset | usage + * 0 | magic number + * 8 | beat qword of pf0 + * 16 | beat qword of pf1 + */ + addr = pf->multi_pf.beat_addr + ((pf->multi_pf.id + 1) << 3); + writeq(jiffies, addr); + /* Beat once per second. */ + mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); +} + +/** + * nfp_nsp_keepalive_start() - Start keepalive mechanism if needed + * @pf: NFP PF Device structure + * + * Return 0 if no error, errno otherwise + */ +static int +nfp_nsp_keepalive_start(struct nfp_pf *pf) +{ + struct nfp_resource *res; + u8 __iomem *base; + int err = 0; + u64 addr; + u32 cpp; + + if (!pf->multi_pf.en) + return 0; + + res = nfp_resource_acquire(pf->cpp, NFP_KEEPALIVE); + if (IS_ERR(res)) + return PTR_ERR(res); + + cpp = nfp_resource_cpp_id(res); + addr = nfp_resource_address(res); + + /* Allocate a fixed area for keepalive. */ + base = nfp_cpp_map_area(pf->cpp, "keepalive", cpp, addr, + nfp_resource_size(res), &pf->multi_pf.beat_area); + if (IS_ERR(base)) { + nfp_err(pf->cpp, "Failed to map area for keepalive\n"); + err = PTR_ERR(base); + goto res_release; + } + + pf->multi_pf.beat_addr = base; + timer_setup(&pf->multi_pf.beat_timer, nfp_nsp_beat_timer, 0); + mod_timer(&pf->multi_pf.beat_timer, jiffies); + +res_release: + nfp_resource_release(res); + return err; +} + +static void +nfp_nsp_keepalive_stop(struct nfp_pf *pf) +{ + if (pf->multi_pf.beat_area) { + del_timer_sync(&pf->multi_pf.beat_timer); + nfp_cpp_area_release_free(pf->multi_pf.beat_area); + } +} + static bool nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) { const struct nfp_mip *mip; - if (!pf->multi_pf_support || nfp_nsp_fw_loaded(nsp) <= 0) + if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) return false; mip = nfp_mip_open(pf->cpp); @@ -504,7 +574,7 @@ nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - bool do_reset, fw_loaded = false; + bool do_reset, fw_loaded = false, fw_new = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; @@ -552,10 +622,12 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; - if (nfp_skip_fw_load(pf, nsp)) { - fw_loaded = true; - goto end; - } + err = nfp_nsp_keepalive_start(pf); + if (err) + return err; + + if (nfp_skip_fw_load(pf, nsp)) + return true; fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || @@ -583,6 +655,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; + fw_new = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { err = nfp_nsp_load_stored_fw(nsp); @@ -598,10 +671,10 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (!err) { dev_info(&pdev->dev, "Finished loading stored FW image\n"); - if (pf->multi_pf_support) + if (pf->multi_pf.en) fw_loaded = true; } else { - if (pf->multi_pf_support) + if (pf->multi_pf.en) dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); else err = 0; @@ -617,10 +690,19 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1 && !pf->multi_pf_support) + if (err < 0) + nfp_nsp_keepalive_stop(pf); + else if (fw_loaded && ifcs == 1 && !pf->multi_pf.en) pf->unload_fw_on_remove = true; -end: + /* Only setting magic number when fw is freshly loaded here. NSP + * won't unload fw when heartbeat stops if the magic number is not + * correct. It's used when firmware is preloaded and shouldn't be + * unloaded when driver exits. + */ + if (fw_new && pf->multi_pf.en) + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return err < 0 ? err : fw_loaded; } @@ -666,8 +748,9 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } - pf->multi_pf_support = pdev->multifunction; - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf_support ? "Multi" : "Single"); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = PCI_FUNC(pdev->devfn); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); err = nfp_nsp_wait(nsp); if (err < 0) @@ -915,6 +998,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: + nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) @@ -954,6 +1038,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); vfree(pf->dumpspec); + nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index d0bfde2a0b2c..c071087c83cd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -84,6 +84,12 @@ struct nfp_dumpspec { * @port_refresh_work: Work entry for taking netdevs out * @shared_bufs: Array of shared buffer structures if FW has any SBs * @num_shared_bufs: Number of elements in @shared_bufs + * @multi_pf: Used in multi-PF setup + * @multi_pf.en: Is multi-PF setup? + * @multi_pf.id: PF index + * @multi_pf.beat_timer:Timer for beat to keepalive + * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive + * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive * * Fields which may change after proble are protected by devlink instance lock. */ @@ -141,7 +147,14 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; - bool multi_pf_support; + + struct { + bool en; + u8 id; + struct timer_list beat_timer; + struct nfp_cpp_area *beat_area; + u8 __iomem *beat_addr; + } multi_pf; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index db94b0bddc92..89a131cffc48 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -64,6 +64,10 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +/* Keepalive */ +#define NFP_KEEPALIVE "nfp.beat" +#define NFP_KEEPALIVE_MAGIC 0x6e66702e62656174ULL /* ASCII of "nfp.beat" */ + int nfp_resource_table_init(struct nfp_cpp *cpp); struct nfp_resource * -- Gitee From 9774750ba3d0ef274e3f6b6f98df5db0acb4e6a1 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Fri, 5 May 2023 19:15:17 +0800 Subject: [PATCH 477/953] anolis: nfp: avoid reclaiming resource mutex by mistake ANBZ: #8563 Multiple PFs of the same controller use the same interface id. So we shouldn't unconditionally reclaim resource mutex when probing, because the mutex may be held by another PF from the same controller. Now give it some time to release the mutex, and reclaim it if timeout. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- .../ethernet/netronome/nfp/nfpcore/nfp_mutex.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index 7bc17b94ac60..1fac6867922b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -341,6 +341,7 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, unsigned long long address) { + unsigned long timeout = jiffies + 2 * HZ; const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ u16 interface = nfp_cpp_interface(cpp); @@ -352,12 +353,16 @@ int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, return err; /* Check lock */ - err = nfp_cpp_readl(cpp, mur, address, &tmp); - if (err < 0) - return err; + while (time_is_after_jiffies(timeout)) { + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; - if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) - return 0; + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + msleep_interruptible(10); + } /* Bust the lock */ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); -- Gitee From 5aa1649e166718edfd0b26ffbe2b2a444f656b11 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Thu, 2 Mar 2023 18:28:06 +0800 Subject: [PATCH 478/953] anolis: nfp: redefine PF id used to format symbols ANBZ: #8563 Taking account that NFP3800 supports 4 physcial functions per controller, now recalcuate PF id that used to format symbols to communicate with application firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 2 +- .../net/ethernet/netronome/nfp/flower/main.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_main.c | 18 +++++++++++------- drivers/net/ethernet/netronome/nfp/nfp_main.h | 1 + .../ethernet/netronome/nfp/nfpcore/nfp_dev.c | 2 ++ .../ethernet/netronome/nfp/nfpcore/nfp_dev.h | 1 + 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 69e84ff7f2e5..41d18df97c85 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -362,7 +362,7 @@ int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) const struct nfp_rtsym *sym; int res; - abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = nfp_get_pf_id(pf); /* Check if Qdisc offloads are supported */ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 83eaa5ae3cd4..565987f0a595 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -378,10 +378,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { - u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + u8 nfp_pcie = nfp_get_pf_id(app->pf); enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 93e234575a76..8174a07b3cf7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -69,6 +69,13 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +u8 nfp_get_pf_id(struct nfp_pf *pf) +{ + return nfp_cppcore_pcie_unit(pf->cpp) * + pf->dev_info->pf_num_per_unit + + pf->multi_pf.id; +} + int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { @@ -76,7 +83,7 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, int err = 0; u64 val; - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), format, nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -95,8 +102,7 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, { char pf_symbol[256]; - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); } @@ -801,10 +807,8 @@ static void nfp_fw_unload(struct nfp_pf *pf) static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { + unsigned int pf_id = nfp_get_pf_id(pf); char pf_symbol[256]; - unsigned int pf_id; - - pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); @@ -830,7 +834,7 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) int err = 0; u64 val; - snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index c071087c83cd..66bc1f48fee0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -208,4 +208,5 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf); unsigned int nfp_net_lr2speed(unsigned int linkrate); unsigned int nfp_net_speed2lr(unsigned int speed); +u8 nfp_get_pf_id(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c index 0725b51c2a95..8a7c5de0de77 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -19,6 +19,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0a00, .pcie_expl_offset = 0xd000, .qc_area_sz = 0x100000, + .pf_num_per_unit = 4, }, [NFP_DEV_NFP3800_VF] = { .dma_mask = DMA_BIT_MASK(48), @@ -38,6 +39,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0400, .pcie_expl_offset = 0x1000, .qc_area_sz = 0x80000, + .pf_num_per_unit = 1, }, [NFP_DEV_NFP6000_VF] = { .dma_mask = DMA_BIT_MASK(40), diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index e4d38178de0f..d948c9c4a09a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -35,6 +35,7 @@ struct nfp_dev_info { u32 pcie_cfg_expbar_offset; u32 pcie_expl_offset; u32 qc_area_sz; + u8 pf_num_per_unit; }; extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT]; -- Gitee From 6699f8f32943591b037f4080a84f94424336ce00 Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Tue, 7 Mar 2023 17:02:29 +0800 Subject: [PATCH 479/953] anolis: nfp: apply one port per PF for multi-PF setup ANBZ: #8563 Only one port per PF is allowed in multi-PF setup. While eth_table still carries the total port info, each PF need bind itself with correct port according to PF id. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/abm/main.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 +- .../net/ethernet/netronome/nfp/flower/main.c | 17 ++++++++++------- .../net/ethernet/netronome/nfp/nfp_net_main.c | 8 ++++++-- drivers/net/ethernet/netronome/nfp/nfp_port.c | 3 ++- drivers/net/ethernet/netronome/nfp/nic/main.c | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5d3df28c648f..d4acaa15629d 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -451,7 +451,7 @@ static int nfp_abm_init(struct nfp_app *app) nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f469950c7265..3d928dfba114 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -70,7 +70,7 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nfp_err(pf->cpp, "No ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 565987f0a595..88e8ae25f0cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -428,10 +428,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { - port->pf_id = i; + port->pf_id = app->pf->multi_pf.id; port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; + port->pf_id = app->pf->multi_pf.id; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; @@ -496,24 +496,27 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + int err, reify_cnt, phy_reprs_num; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; - int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; + phy_reprs_num = app->pf->multi_pf.en ? app->pf->max_data_vnics : + eth_tbl->count; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } - for (i = 0; i < eth_tbl->count; i++) { - unsigned int phys_port = eth_tbl->ports[i].index; + for (i = 0; i < phy_reprs_num; i++) { + int idx = app->pf->multi_pf.en ? app->pf->multi_pf.id : i; + unsigned int phys_port = eth_tbl->ports[idx].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; @@ -542,7 +545,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_repr_free(repr); goto err_reprs_clean; } - err = nfp_port_init_phy_port(app->pf, app, port, i); + err = nfp_port_init_phy_port(app->pf, app, port, idx); if (err) { kfree(repr_priv); nfp_port_free(port); @@ -609,7 +612,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { - if (id > 0) { + if (id > 0 && !app->pf->multi_pf.en) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index cbe4972ba104..ad51fbfc152d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -141,7 +141,7 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; - nn->id = id; + nn->id = pf->multi_pf.en ? pf->multi_pf.id : id; if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); @@ -184,7 +184,7 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, for (i = 0; i < pf->max_data_vnics; i++) { nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, - stride, i); + stride, pf->multi_pf.en ? pf->multi_pf.id : i); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_free_prev; @@ -706,6 +706,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; + if (pf->multi_pf.en && pf->max_data_vnics != 1) { + nfp_err(pf->cpp, "Only one data_vnic per PF is supported in multiple PF setup, please update FW.\n"); + return -EPERM; + } err = nfp_net_pci_map_mem(pf); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 54640bcb70fb..dadd6844c385 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -189,7 +189,8 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; - port->netdev->dev_port = id; + if (!pf->multi_pf.en) + port->netdev->dev_port = id; if (pf->mac_stats_mem) port->eth_stats = pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index 9dd5afe37f6e..e7a2d01bcbff 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -12,7 +12,8 @@ static int nfp_nic_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; - if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count && + !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; -- Gitee From 410c725aa7794d530dc9453453ba4230ce044c3c Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Wed, 1 Mar 2023 19:23:49 +0800 Subject: [PATCH 480/953] anolis: nfp: enable multi-PF in application firmware if supported ANBZ: #8563 For backward compatibility concern, the new application firmware is designed to support both single-PF setup and multi-PF setup. Thus driver should inform application firmware which setup current is. This should be done as early as possible since the setup may affect some configurations exposed by firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- .../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 1 + .../net/ethernet/netronome/nfp/nfp_net_main.c | 121 +++++++++++++----- 2 files changed, 92 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 3e63f6d6a563..d6b127f13ed3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -268,6 +268,7 @@ #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */ #define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */ #define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */ +#define NFP_NET_CFG_CTRL_MULTI_PF (0x1 << 5) /* Multi PF */ #define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index ad51fbfc152d..c06e1e9c9412 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -684,15 +684,100 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) return ret; } +static int nfp_net_pre_init(struct nfp_pf *pf, int *stride) +{ + struct nfp_net_fw_version fw_ver; + struct nfp_cpp_area *area; + u8 __iomem *ctrl_bar; + int err = 0; + + ctrl_bar = nfp_pf_map_rtsym(pf, NULL, "_pf%d_net_bar0", NFP_PF_CSR_SLICE_SIZE, &area); + if (IS_ERR(ctrl_bar)) { + nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + return pf->fw_loaded ? PTR_ERR(ctrl_bar) : 1; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || + fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + *stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 5: + *stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + } + + if (!pf->multi_pf.en) + goto end; + + /* Enable multi-PF. */ + if (readl(ctrl_bar + NFP_NET_CFG_CAP_WORD1) & NFP_NET_CFG_CTRL_MULTI_PF) { + unsigned long long addr; + u32 cfg_q, cpp_id, ret; + unsigned long timeout; + + writel(NFP_NET_CFG_CTRL_MULTI_PF, ctrl_bar + NFP_NET_CFG_CTRL_WORD1); + writel(NFP_NET_CFG_UPDATE_GEN, ctrl_bar + NFP_NET_CFG_UPDATE); + + /* Config queue is next to txq. */ + cfg_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ) + 1; + addr = nfp_qcp_queue_offset(pf->dev_info, cfg_q) + NFP_QCP_QUEUE_ADD_WPTR; + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + err = nfp_cpp_writel(pf->cpp, cpp_id, addr, 1); + if (err) + goto end; + + timeout = jiffies + HZ * NFP_NET_POLL_TIMEOUT; + while ((ret = readl(ctrl_bar + NFP_NET_CFG_UPDATE))) { + if (ret & NFP_NET_CFG_UPDATE_ERR) { + nfp_err(pf->cpp, "Enalbe multi-PF failed\n"); + err = -EIO; + break; + } + + usleep_range(250, 500); + if (time_is_before_eq_jiffies(timeout)) { + nfp_err(pf->cpp, "Enalbe multi-PF timeout\n"); + err = -ETIMEDOUT; + break; + } + }; + } else { + nfp_err(pf->cpp, "Loaded firmware doesn't support multi-PF\n"); + err = -EINVAL; + } + +end: + nfp_cpp_area_release_free(area); + return err; +} + /* * PCI device functions */ int nfp_net_pci_probe(struct nfp_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; - int stride; + int stride = 0; int err; INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); @@ -703,6 +788,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) return -EINVAL; } + err = nfp_net_pre_init(pf, &stride); + if (err) + return err; + pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; @@ -722,34 +811,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_unmap; } - nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || - fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { - nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - - /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { - stride = 2; - nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); - } else { - switch (fw_ver.major) { - case 1 ... 5: - stride = 4; - break; - default: - nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - } - err = nfp_net_pf_app_init(pf, qc_bar, stride); if (err) goto err_unmap; -- Gitee From a36c0923b8f074eb971e0875152ed7eef2e9befa Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Fri, 3 Mar 2023 10:31:05 +0800 Subject: [PATCH 481/953] anolis: nfp: configure VF total count for each PF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8563 By default, PFs share the total 64 VFs equally, i.e., 32 VFs for each PF in two port NIC, which is initialized in each PF’s SR-IOV capability register by management firmware. And a new hwinfo `abi_total_vf` is introduced to make each PF’s VF total count configurable. Management firmware reads the hwinfo and configures it in SR-IOV capability register during boot process. So reboot is required to make the configuration take effect. This is not touched in driver code. Driver then modifies each PF’s `sriov_totalvf` according to maximum VF count supported by the loaded application firmware. Here we apply the rule that the PF with smaller id is satisfied first if total configured count exceeds the limitation. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 49 +++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 8174a07b3cf7..57432d5d1d00 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -224,11 +224,48 @@ static int nfp_pf_board_state_wait(struct nfp_pf *pf) return 0; } +static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, + unsigned int limit_vfs_rtsym) +{ + u16 pos, offset, total; + + if (!pf->multi_pf.en || !limit_vfs_rtsym) + return limit_vfs_rtsym; + + pos = pci_find_ext_capability(pf->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* Management firmware ensures that SR-IOV capability registers + * are initialized correctly. + */ + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_TOTAL_VF, &total); + if (!total) + return 0; + + /* Offset of first VF is relative to its PF. */ + offset += pf->multi_pf.id; + if (offset < pf->dev_info->pf_num_per_unit) + return 0; + + /* For 3800, VF is numbered from max PF count. */ + offset -= pf->dev_info->pf_num_per_unit; + if (offset >= limit_vfs_rtsym) + return 0; + + if (offset + total > limit_vfs_rtsym) + return limit_vfs_rtsym - offset; + + return total; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { + unsigned int limit_vfs_rtsym; int err; - pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); + limit_vfs_rtsym = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; @@ -239,9 +276,13 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return err; } - err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); - if (err) - nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + pf->limit_vfs = nfp_pf_get_limit_vfs(pf, limit_vfs_rtsym); + if (pci_sriov_get_totalvfs(pf->pdev) != pf->limit_vfs) { + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + } + return 0; } -- Gitee From e549150c8e40e08469fcfe45d139c378fc967b67 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Thu, 30 Mar 2023 10:24:21 +0800 Subject: [PATCH 482/953] anolis: nfp: configure VF split info into application firmware ANBZ: #8563 In multi-PF case, all PFs share total 64 VFs. To support the VF count of each PF configurable, driver needs to write the VF count and the first VF id into application firmware, so that firmware can initialize and allocate relevant resource accordingly. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_main.h | 2 ++ .../net/ethernet/netronome/nfp/nfp_net_main.c | 16 ++++++++++++ .../ethernet/netronome/nfp/nfp_net_sriov.c | 25 +++++++++++++++++++ .../ethernet/netronome/nfp/nfp_net_sriov.h | 5 ++++ 5 files changed, 49 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 57432d5d1d00..afb210943b78 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -254,6 +254,7 @@ static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, if (offset >= limit_vfs_rtsym) return 0; + pf->multi_pf.vf_fid = offset; if (offset + total > limit_vfs_rtsym) return limit_vfs_rtsym - offset; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 66bc1f48fee0..750dfaf4ca82 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -87,6 +87,7 @@ struct nfp_dumpspec { * @multi_pf: Used in multi-PF setup * @multi_pf.en: Is multi-PF setup? * @multi_pf.id: PF index + * @multi_pf.vf_fid: Id of first VF that belongs to this PF * @multi_pf.beat_timer:Timer for beat to keepalive * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive @@ -151,6 +152,7 @@ struct nfp_pf { struct { bool en; u8 id; + u8 vf_fid; struct timer_list beat_timer; struct nfp_cpp_area *beat_area; u8 __iomem *beat_addr; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index c06e1e9c9412..4d4af60400af 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -293,6 +293,16 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf) return err; } +static void nfp_net_pf_clean_vnics(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nfp_net_is_data_vnic(nn)) + nfp_net_pf_clean_vnic(pf, nn); + } +} + static int nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) { @@ -843,11 +853,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; + err = nfp_net_pf_init_sriov(pf); + if (err) + goto err_clean_vnics; + devl_unlock(devlink); devlink_register(devlink); return 0; +err_clean_vnics: + nfp_net_pf_clean_vnics(pf); err_stop_app: nfp_net_pf_app_stop(pf); err_free_irqs: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 6eeeb0fda91f..f516ba7a429e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -14,6 +14,9 @@ #include "nfp_net.h" #include "nfp_net_sriov.h" +/* The configurations that precede VF creating. */ +#define NFP_NET_VF_PRE_CONFIG NFP_NET_VF_CFG_MB_CAP_SPLIT + static int nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { @@ -29,6 +32,10 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool return -EOPNOTSUPP; } + /* No need to check vf for the pre-configurations. */ + if (cap & NFP_NET_VF_PRE_CONFIG) + return 0; + if (vf < 0 || vf >= app->pf->num_vfs) { if (warn) nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); @@ -309,3 +316,21 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, return 0; } + +int nfp_net_pf_init_sriov(struct nfp_pf *pf) +{ + int err; + + if (!pf->multi_pf.en || !pf->limit_vfs) + return 0; + + err = nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_SPLIT, "split", true); + if (err) + return err; + + writeb(pf->limit_vfs, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_CNT); + + /* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_fid to FW. */ + return nfp_net_sriov_update(pf->app, pf->multi_pf.vf_fid, + NFP_NET_VF_CFG_MB_UPD_SPLIT, "split"); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 2d445fa199dc..8de959018819 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -21,6 +21,7 @@ #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_CAP_SPLIT (0x1 << 8) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -30,6 +31,8 @@ #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_UPD_SPLIT (0x1 << 8) +#define NFP_NET_VF_CFG_MB_VF_CNT 0x6 #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -68,4 +71,6 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int nfp_app_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int nfp_net_pf_init_sriov(struct nfp_pf *pf); + #endif /* _NFP_NET_SRIOV_H_ */ -- Gitee From 01b2f906d73200f1a69c08e1b00dd6e4a1d4fba2 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 4 Apr 2023 10:49:03 +0800 Subject: [PATCH 483/953] anolis: nfp: use absolute vf id for multi-PF case ANBZ: #8563 In multi-PF setup, absolute VF id is required to configure attributes for corresponding VF. Add helper function to map rtsym with specified offset. With PF's first VF as base offset, we can access `vf_cfg_mem` as before. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 14 +++++++++++--- drivers/net/ethernet/netronome/nfp/nfp_main.h | 4 ++++ .../net/ethernet/netronome/nfp/nfp_net_main.c | 10 ++++++---- .../net/ethernet/netronome/nfp/nfp_net_sriov.c | 14 ++++++++++---- .../ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 4 ++++ .../ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 16 ++++++++++++---- 6 files changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index afb210943b78..21c1d4877a81 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -97,14 +97,22 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, } u8 __iomem * -nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { char pf_symbol[256]; snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_rtsym_map_offset(pf->rtbl, pf_symbol, name, offset, min_size, area); +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_pf_map_rtsym_offset(pf, name, sym_fmt, 0, min_size, area); } /* Callers should hold the devlink instance lock */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 750dfaf4ca82..5a01c66ddce9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -179,6 +179,10 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val); int nfp_net_pf_get_app_id(struct nfp_pf *pf); u8 __iomem * +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 4d4af60400af..5df99c60c3b2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -473,9 +473,10 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * pf->limit_vfs, - &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym_offset(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->multi_pf.vf_fid, + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -484,7 +485,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vf_cfg_mem = NULL; } - min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; + min_size = NFP_NET_VF_CFG_SZ * (pf->limit_vfs + pf->multi_pf.vf_fid) + + NFP_NET_VF_CFG_MB_SZ; pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", "_pf%d_net_vf_cfg2", min_size, &pf->vfcfg_tbl2_area); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index f516ba7a429e..67aea9445aa2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -72,7 +72,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - int err; + int err, abs_vf; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) @@ -85,13 +85,14 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return -EINVAL; } + abs_vf = vf + app->pf->multi_pf.vf_fid; /* Write MAC to VF entry in VF config symbol */ - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + abs_vf * NFP_NET_VF_CFG_SZ; writel(get_unaligned_be32(mac), app->pf->vfcfg_tbl2 + vf_offset); writew(get_unaligned_be16(mac + 4), app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); - err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + err = nfp_net_sriov_update(app, abs_vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); if (!err) nfp_info(app->pf->cpp, "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n", @@ -145,6 +146,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, if (vlan_tag && is_proto_sup) vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); @@ -169,6 +171,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, return -EINVAL; } + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE, max_tx_rate ? max_tx_rate : @@ -195,6 +198,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) return err; /* Write spoof check control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -219,6 +223,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) return err; /* Write trust control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -253,6 +258,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, } /* Write link state to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -278,7 +284,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, if (err) return err; - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + (vf + app->pf->multi_pf.vf_fid) * NFP_NET_VF_CFG_SZ; mac_hi = readl(app->pf->vfcfg_tbl2 + vf_offset); mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 49a4d3f56b56..4042352f83b0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -101,6 +101,10 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, u64 value); u8 __iomem * +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 2260c2403a83..97a4417a1c1b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -520,8 +520,9 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, } u8 __iomem * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { const struct nfp_rtsym *sym; u8 __iomem *mem; @@ -540,12 +541,12 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return (u8 __iomem *)ERR_PTR(err); } - if (sym->size < min_size) { + if (sym->size < min_size + offset) { nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr + offset, sym->size - offset, area); if (IS_ERR(mem)) { nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); @@ -554,3 +555,10 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return mem; } + +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_rtsym_map_offset(rtbl, name, id, 0, min_size, area); +} -- Gitee From f2e421e3fb8bd70b5cf7da29ca5ce2dc88a3c66b Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 19 Sep 2023 19:27:06 +0800 Subject: [PATCH 484/953] anolis: nfp: refine firmware loading and keepalive mechanism ANBZ: #8563 Currently we skip application firmware loading either because other PFs are running or the firmware is preloaded. But sometimes the firmware is not preloaded intentionally but is remained unexpectedly, in which case we need a chance to reload firmware. Now we only skip firmware loading when there're other PFs in running. And the firmware loading flow of multi-PF setup is more consistent with that of single-PF setup now. Keepalive magic number is set when PFs are removed if firmware needs to be unloaded. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 104 +++++++++--------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 21c1d4877a81..e1ddc6e667fa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -525,20 +525,24 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } -static void -nfp_nsp_beat_timer(struct timer_list *t) +static u8 __iomem * +nfp_get_beat_addr(struct nfp_pf *pf, int pf_id) { - struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); - u8 __iomem *addr; - /* Each PF has corresponding qword to beat: * offset | usage * 0 | magic number * 8 | beat qword of pf0 * 16 | beat qword of pf1 */ - addr = pf->multi_pf.beat_addr + ((pf->multi_pf.id + 1) << 3); - writeq(jiffies, addr); + return pf->multi_pf.beat_addr + ((pf_id + 1) << 3); +} + +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); /* Beat once per second. */ mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); } @@ -595,28 +599,42 @@ nfp_nsp_keepalive_stop(struct nfp_pf *pf) } } +static u64 +nfp_get_sibling_beat(struct nfp_pf *pf) +{ + unsigned int i = 0; + u64 beat = 0; + + if (!pf->multi_pf.beat_addr) + return 0; + + for (; i < pf->dev_info->pf_num_per_unit; i++) { + if (i == pf->multi_pf.id) + continue; + + beat += readq(nfp_get_beat_addr(pf, i)); + } + + return beat; +} + static bool nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) { - const struct nfp_mip *mip; + unsigned long timeout = jiffies + HZ * 3; + u64 beat = nfp_get_sibling_beat(pf); if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) return false; - mip = nfp_mip_open(pf->cpp); - if (!mip) - return false; + while (time_is_after_jiffies(timeout)) { + if (beat != nfp_get_sibling_beat(pf)) + return true; - /* For the case that system boots from pxe, we need - * reload FW if pxe FW is running. - */ - if (!strncmp(nfp_mip_name(mip), "pxe", 3)) { - nfp_mip_close(mip); - return false; + msleep(500); } - pf->mip = mip; - return true; + return false; } /** @@ -630,7 +648,7 @@ nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - bool do_reset, fw_loaded = false, fw_new = false; + bool do_reset, fw_loaded = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; @@ -711,30 +729,17 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; - fw_new = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { - err = nfp_nsp_load_stored_fw(nsp); - - /* Same logic with loading from disk when multi-PF. Othewise: - * - * Don't propagate this error to stick with legacy driver + /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. - * - * Don't flag the fw_loaded in this case since other devices - * may reuse the firmware when configured this way. */ - if (!err) { + if (!nfp_nsp_load_stored_fw(nsp)) dev_info(&pdev->dev, "Finished loading stored FW image\n"); - if (pf->multi_pf.en) - fw_loaded = true; - } else { - if (pf->multi_pf.en) - dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); - else - err = 0; - } + /* Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way + */ } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } @@ -748,17 +753,9 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) */ if (err < 0) nfp_nsp_keepalive_stop(pf); - else if (fw_loaded && ifcs == 1 && !pf->multi_pf.en) + else if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; - /* Only setting magic number when fw is freshly loaded here. NSP - * won't unload fw when heartbeat stops if the magic number is not - * correct. It's used when firmware is preloaded and shouldn't be - * unloaded when driver exits. - */ - if (fw_new && pf->multi_pf.en) - writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); - return err < 0 ? err : fw_loaded; } @@ -840,6 +837,12 @@ static void nfp_fw_unload(struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* NSP will unload firmware when no active PF exists. */ + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return; + } + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); @@ -1011,8 +1014,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_hwinfo_free; - if (!pf->mip) - pf->mip = nfp_mip_open(pf->cpp); + pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); @@ -1052,11 +1054,11 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: - nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); @@ -1092,12 +1094,12 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); vfree(pf->dumpspec); - nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); -- Gitee From 3de87b080ca5aaed6df5189eb8b0069e646b48f0 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 21 Nov 2023 15:43:34 +0800 Subject: [PATCH 485/953] anolis: nfp: grant the right of reclaiming resources to PF0 only ANBZ: #8563 In multi-PF setup, one PF still may bust the resource lock that is held by another. Since it's an error handling to bust lock and we only need a way here, no need to let every PF have the error handler. Now leave the right to the first PF only. Signed-off-by: Yinjun Zhang Reviewed-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 17 ++++++++++------- .../ethernet/netronome/nfp/nfpcore/nfp_mutex.c | 15 +++++---------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index e1ddc6e667fa..bd3f681b6d18 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -801,10 +801,6 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } - pf->multi_pf.en = pdev->multifunction; - pf->multi_pf.id = PCI_FUNC(pdev->devfn); - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); - err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; @@ -993,9 +989,16 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } - err = nfp_resource_table_init(pf->cpp); - if (err) - goto err_cpp_free; + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = PCI_FUNC(pdev->devfn); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + /* Only PF0 has the right to reclaim locked resources. */ + if (!pf->multi_pf.id) { + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + } pf->hwinfo = nfp_hwinfo_read(pf->cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index 1fac6867922b..7bc17b94ac60 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -341,7 +341,6 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, unsigned long long address) { - unsigned long timeout = jiffies + 2 * HZ; const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ u16 interface = nfp_cpp_interface(cpp); @@ -353,16 +352,12 @@ int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, return err; /* Check lock */ - while (time_is_after_jiffies(timeout)) { - err = nfp_cpp_readl(cpp, mur, address, &tmp); - if (err < 0) - return err; - - if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) - return 0; + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; - msleep_interruptible(10); - } + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; /* Bust the lock */ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); -- Gitee From e8544eb68838292b1bd11014c302c3a4bcca151a Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Wed, 30 Aug 2023 16:35:08 +0200 Subject: [PATCH 486/953] anolis: nfp: add pci_error_handler callback ANBZ: #8563 Add callbacks to catch FLR prepare and done. Stop the heartbeat timer before the FLR to make sure it can't trigger during. We do need to manually write keepalive value just before this to make sure the firmware is kept alive, otherwise firmware maybe unloaded during frequent FLR for both PFs. Resume the timer when the FLR is done. Signed-off-by: Louis Peens Signed-off-by: Baowen Zheng Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index bd3f681b6d18..9788f7958d92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1125,12 +1125,43 @@ static void nfp_pci_shutdown(struct pci_dev *pdev) __nfp_pci_shutdown(pdev, false); } +void nfp_pci_error_reset_prepare(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* Pause heartbeat timer so it can't happen during FLR */ + del_timer_sync(&pf->multi_pf.beat_timer); + /* We need to write keepalive to keep firmware alive + * during frequent FLR. + */ + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + } + } +} + +void nfp_pci_error_reset_done(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) + if (pf->multi_pf.en && pf->multi_pf.beat_addr) + add_timer(&pf->multi_pf.beat_timer); +} + +static const struct pci_error_handlers nfp_pci_err_handler = { + .reset_prepare = nfp_pci_error_reset_prepare, + .reset_done = nfp_pci_error_reset_done, +}; + static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, + .err_handler = &nfp_pci_err_handler, .sriov_configure = nfp_pcie_sriov_configure, }; -- Gitee From 99363ba682a9c0dbef55e73cb002deb548645f92 Mon Sep 17 00:00:00 2001 From: Baowen Zheng Date: Fri, 1 Sep 2023 08:23:54 +0200 Subject: [PATCH 487/953] anolis: nfp: reset netdev state on FLR event. ANBZ: #8563 We need to bring down netdev when the pf is in FLR progress to prevent driver and firmware access chip memory, in opposite, bring up netdev when FLR is finished. Signed-off-by: Baowen Zheng Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 9788f7958d92..bedf4af5833d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1130,6 +1130,8 @@ void nfp_pci_error_reset_prepare(struct pci_dev *dev) struct nfp_pf *pf = pci_get_drvdata(dev); if (pf) { + struct nfp_net *nn; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { /* Pause heartbeat timer so it can't happen during FLR */ del_timer_sync(&pf->multi_pf.beat_timer); @@ -1138,6 +1140,14 @@ void nfp_pci_error_reset_prepare(struct pci_dev *dev) */ writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); } + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + netdev->netdev_ops->ndo_stop(netdev); + } + } } } @@ -1145,9 +1155,21 @@ void nfp_pci_error_reset_done(struct pci_dev *dev) { struct nfp_pf *pf = pci_get_drvdata(dev); - if (pf) + if (pf) { + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + rtnl_lock(); + netdev->netdev_ops->ndo_open(netdev); + rtnl_unlock(); + } + } if (pf->multi_pf.en && pf->multi_pf.beat_addr) add_timer(&pf->multi_pf.beat_timer); + } } static const struct pci_error_handlers nfp_pci_err_handler = { -- Gitee From b75dd622020954c347ecf9c5f2f5a3dc36e56f99 Mon Sep 17 00:00:00 2001 From: Ryno Swart Date: Thu, 23 Nov 2023 16:40:02 +0200 Subject: [PATCH 488/953] anolis: nfp: preserve multi-pf control bit during initialisation ANBZ: #8563 Preserve the multi-PF control bit if the functionality is available. Multi-PF mode is configured before this, during pre-init. The old behaviour would reset the control bit to zero regardless of which mode is active. This had no effect on the card, as the firmware rejects all requests to return to single-PF mode. This change only preserves the control bit in the config BAR for verification. Signed-off-by: Ryno Swart Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index f2085340a1cf..fb0dc1481307 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2705,6 +2705,11 @@ int nfp_net_init(struct nfp_net *nn) if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER; + /* Multi-PF is already enabled during pre-init, preserve control bit */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_MULTI_PF) + nn->dp.ctrl_w1 |= (nn_readl(nn, NFP_NET_CFG_CTRL_WORD1) & + NFP_NET_CFG_CTRL_MULTI_PF); + /* Stash the re-configuration queue away. First odd queue in TX Bar */ nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; -- Gitee From c9070c8de5e2f490483cd28562723cb43f1cd912 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Wed, 6 Mar 2024 17:28:31 +0800 Subject: [PATCH 489/953] anolis: nfp: fix initialization of incorrect PF id ANBZ: #8563 Using function id from PCI BDF as PF id is not reliable when PF is passed through to VM. Now we get PF id from vendor specific capability register which is filled by management firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 22 ++++++++++++++----- .../netronome/nfp/nfpcore/nfp6000_pcie.c | 15 ++++++++----- .../netronome/nfp/nfpcore/nfp6000_pcie.h | 9 +++++++- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index bedf4af5833d..22000315a79f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -933,6 +933,18 @@ static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static u8 nfp_init_pf_id(struct pci_dev *pdev) +{ + int vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + u8 id = 0; + + if (!vndr) + return PCI_FUNC(pdev->devfn); + + pci_read_config_byte(pdev, vndr + NFP_VNDR_PF_ID_OFFSET, &id); + return id; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -983,16 +995,16 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_priv_unset; } - pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = nfp_init_pf_id(pdev); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info, pf); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } - pf->multi_pf.en = pdev->multifunction; - pf->multi_pf.id = PCI_FUNC(pdev->devfn); - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); - /* Only PF0 has the right to reclaim locked resources. */ if (!pf->multi_pf.id) { err = nfp_resource_table_init(pf->cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 3f10c5365c80..8e60e20c4fee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -29,6 +29,7 @@ #include "nfp_cpp.h" #include "nfp_dev.h" +#include "../nfp_main.h" #include "nfp6000/nfp6000.h" @@ -532,7 +533,8 @@ static int bar_cmp(const void *aptr, const void *bptr) * BAR1.0-BAR1.7: -- * BAR2.0-BAR2.7: -- */ -static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface, + struct nfp_pf *pf) { const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( @@ -611,7 +613,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - int pf; + int pf_id; msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); @@ -624,8 +626,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) switch (nfp->pdev->device) { case PCI_DEVICE_ID_NFP3800: - pf = nfp->pdev->devfn & 7; - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + pf_id = pf->multi_pf.id; + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf_id); break; case PCI_DEVICE_ID_NFP4000: case PCI_DEVICE_ID_NFP5000: @@ -1309,7 +1311,8 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = { * Return: NFP CPP handle */ struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info) +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf) { struct nfp6000_pcie *nfp; u16 interface; @@ -1353,7 +1356,7 @@ nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_i goto err_free_nfp; } - err = enable_bars(nfp, interface); + err = enable_bars(nfp, interface, pf); if (err) goto err_free_nfp; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 097660b673db..e992f5c91013 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -11,7 +11,14 @@ #include "nfp_cpp.h" +/* Vendor specific register layout */ +#define NFP_VNDR_HEADER_OFFSET 0x0 +#define NFP_VNDR_PF_ID_OFFSET 0x4 + +struct nfp_pf; + struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info); +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf); #endif /* NFP6000_PCIE_H */ -- Gitee From 99f4bf29a98b13b64bac1c3572c8501c7954711d Mon Sep 17 00:00:00 2001 From: Baowen Zheng Date: Sun, 25 Feb 2024 21:31:00 -0500 Subject: [PATCH 490/953] anolis: nfp: add device activate command for nsp service ANBZ: #8563 Add device activate command for nsp service in multiple pfs case. We need to activate device if the probing pf is not pf0 to make vfs belong to other pfs send traffic normally. When removing pcie device, we need to keep the device active if the pf is pf 0. Signed-off-by: Baowen Zheng Reviewed-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 19 ++++++++++++++++++- .../ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 12 ++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 22000315a79f..c74b314d035f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -805,6 +805,15 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) if (err < 0) goto exit_close_nsp; + if (pf->multi_pf.en && pf->multi_pf.id) { + err = nfp_nsp_device_activate(nsp); + if (err < 0 && err != -EOPNOTSUPP) { + dev_err(&pdev->dev, + "Failed to activate the NFP device: %d\n", err); + goto exit_close_nsp; + } + } + nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); @@ -1096,12 +1105,14 @@ static int nfp_pci_probe(struct pci_dev *pdev, static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { + bool keep_device_active; struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; + keep_device_active = pf->multi_pf.en && !pf->multi_pf.id; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); @@ -1124,7 +1135,13 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) kfree(pf->nspi); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); - pci_disable_device(pdev); + + /* In multiple pfs case, we need to keep master flag of pf 0 + * to ensure vfs of other pfs work normally because of + * hardware limitation. + */ + if (!keep_device_active) + pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 56682c530b26..55d799d420aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -102,6 +102,7 @@ enum nfp_nsp_cmd { SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */ + SPCODE_DEV_ACTIVATE = 29, /* Activate hardware for multiple pfs case */ }; struct nfp_nsp_dma_buf { @@ -732,6 +733,17 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state) return nfp_nsp_command(state, SPCODE_SOFT_RESET); } +int nfp_nsp_device_activate(struct nfp_nsp *state) +{ + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 38. + */ + if (nfp_nsp_get_abi_ver_minor(state) < 38) + return -EOPNOTSUPP; + + return nfp_nsp_command(state, SPCODE_DEV_ACTIVATE); +} + int nfp_nsp_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_command(state, SPCODE_MAC_INIT); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac04917..f34b996b0749 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -17,6 +17,7 @@ u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_device_activate(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); -- Gitee From 2c23678ed043b25b40ab5c8c96b13b04d83904ec Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 20 Feb 2024 16:44:10 +0800 Subject: [PATCH 491/953] anolis: nfp: try firmware name of card type without media info ANBZ: #8563 Now all application firmware is indifferent of port speed, so do not bother to compose the firmware name with media info. This can reduce a number of symlinks for firmware files. For backward compatibility concern, the trial of firmware name with media info is still kept. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index c74b314d035f..ddd2335013c2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -460,7 +460,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (fw) return fw; - /* Finally try the card type and media */ + /* Then try the card type */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -474,6 +474,12 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return NULL; } + sprintf(fw_name, "netronome/%s.nffw", fw_model); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); -- Gitee From db3d78d711438e622c6568fdc53af4450c9ac892 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 20 Feb 2024 16:52:48 +0800 Subject: [PATCH 492/953] anolis: nfp: update module firmware list ANBZ: #8563 Update the module firmware list to accommodate some new NFP products. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index ddd2335013c2..0fae86d8abe0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1271,6 +1271,13 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); +MODULE_FIRMWARE("netronome/AMDA0161-1001.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1113.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1114.nffw"); MODULE_AUTHOR("Corigine, Inc. "); MODULE_LICENSE("GPL"); -- Gitee From b8ee32c40d00e9add058e613f187a873713d56f1 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:04 +0800 Subject: [PATCH 493/953] anolis: x86/tsc: Make cur->adjusted values in package#1 to be the same ANBZ: #7809 When resume from S4 on Zhaoxin 2 packages platform that support X86_FEATURE_TSC_ADJUST, the following warning messages appear: [ 327.445302] [Firmware Bug]: TSC ADJUST differs: CPU15 45960750 --> 78394770. Restoring [ 329.209120] [Firmware Bug]: TSC ADJUST differs: CPU14 45960750 --> 78394770. Restoring [ 329.209128] [Firmware Bug]: TSC ADJUST differs: CPU13 45960750 --> 78394770. Restoring [ 329.209138] [Firmware Bug]: TSC ADJUST differs: CPU12 45960750 --> 78394770. Restoring [ 329.209151] [Firmware Bug]: TSC ADJUST differs: CPU11 45960750 --> 78394770. Restoring [ 329.209160] [Firmware Bug]: TSC ADJUST differs: CPU10 45960750 --> 78394770. Restoring [ 329.209169] [Firmware Bug]: TSC ADJUST differs: CPU9 45960750 --> 78394770. Restoring The reason is: Step 1: Bring up. TSC is sync after bring up with following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value1 value1 Package#1 non-first CPU value1 value1 Step 2: Suspend to S4. Settings in Step 1 are not changed in this Step. Step 3: Bring up caused by S4 wake up event. TSC is sync when bring up with following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value2 value2 Package#1 non-first CPU value2 value2 Step 4: Resume from S4. When resuming from S4, Current TSC synchronous mechanism cause following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value2 value2 Package#1 non-first CPU value2 value1 In these Steps, value1 != 0 and value2 != value1. In Step4, as function tsc_store_and_check_tsc_adjust() do, when the value of MSR 0x3b on the non-first online CPU in package#1 is equal to the value of cur->adjusted on the first online CPU in the same package, the cur->adjusted value on this non-first online CPU will hold the old value1. This cause function tsc_verify_tsc_adjust() set the value of MSR 0x3b on the non-first online CPUs in the package#1 to the old value1 and print the beginning warning messages. Fix it by setting cur->adjusted value on the non-first online CPU in a package to the value of MSR 0x3b on the same CPU when they are not equal. Signed-off-by: leoliu-oc Reviewed-by: Xingrui Yi Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2697 --- arch/x86/kernel/tsc_sync.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 1123ef3ccf90..d0369e9d6b8c 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -232,6 +232,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync -- Gitee From c34b31ad5a8720734ca796f7b8353394be0943e9 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:04:56 +0800 Subject: [PATCH 494/953] anolis: x86/cpufeatures: Add low performance CRC32C instruction CPU feature ANBZ: #7809 SSE4.2 on Zhaoxin CPUs are compatible with Intel. The presence of CRC32C instruction is enumerated by CPUID.01H:ECX.SSE4_2[bit 20] = 1. Some Zhaoxin CPUs declare support SSE4.2 instruction sets but their CRC32C instruction are working with low performance. Add a synthetic CPU flag to indicates that the CRC32C instruction is not working as intended. This low performance CRC32C instruction flag is depend on X86_FEATURE_XMM4_2. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/cpuid-deps.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index b4c7433c7451..ca055890773b 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -333,6 +333,7 @@ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ #define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */ +#define X86_FEATURE_CRC32C_LOW_PERF (11*32+27) /* "" Low performance */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index e462c1d3800a..4f559eb49525 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -82,6 +82,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, + { X86_FEATURE_CRC32C_LOW_PERF, X86_FEATURE_XMM4_2 }, {} }; -- Gitee From 2f297fc9a2b5e9a74d033ff18a122c099653109c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 6 Mar 2024 16:15:30 +0800 Subject: [PATCH 495/953] anolis: x86/cpu: Set low performance CRC32C flag on some Zhaoxin CPUs ANBZ: #7809 Some Zhaoxin CPUs declare support SSE4.2 instruction sets but having a CRC32C instruction implementation that not working as intended. Set low performance CRC32C flag on these CPUs for later use. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 --- arch/x86/kernel/cpu/centaur.c | 7 +++++++ arch/x86/kernel/cpu/zhaoxin.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index a5c01c8f8824..ad6982391bc9 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -110,6 +110,13 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 2126b10de796..f9a65b57a6bd 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -79,6 +79,13 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } -- Gitee From 83ea33eb9c42c2cdca1fad190af5900f96eb8dfc Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:04:57 +0800 Subject: [PATCH 496/953] anolis: crypto: x86/crc32c-intel Exclude low performance CRC32C instruction CPUs ANBZ: #7809 Low performance CRC32C instruction CPUs expect to use the driver crc32c-generic. So remove these CPUs support from crc32c-intel. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 --- arch/x86/crypto/crc32c-intel_glue.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index feccb5254c7e..91d318b08fb7 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -224,6 +224,11 @@ static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; + + /* Don't merit use low performance CRC32C instruction */ + if (boot_cpu_has(X86_FEATURE_CRC32C_LOW_PERF)) + return -ENODEV; + #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; -- Gitee From 9d075343aaedfa6fb124f95fc9123ea11cb710b7 Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Wed, 11 Oct 2023 08:48:42 +0200 Subject: [PATCH 497/953] x86/resctrl: Fix remaining kernel-doc warnings ANBZ: #8626 commit f05fd4ce99635975caa3e6a0eeb02118637f72a3 upstream. The kernel test robot reported kernel-doc warnings here: arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'of' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'seq' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'v' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1144: warning: Function parameter or member 'type' not described in '__rdtgroup_cbm_overlaps' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1224: warning: Function parameter or member 'rdtgrp' not described in 'rdtgroup_mode_test_exclusive' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'of' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'buf' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'nbytes' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'off' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 'of' not described in 'rdtgroup_size_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 's' not described in 'rdtgroup_size_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 'v' not described in 'rdtgroup_size_show' The first two functions are missing an argument description while the other three are file callbacks and don't require a kernel-doc comment. Closes: https://lore.kernel.org/oe-kbuild-all/202310070434.mD8eRNAz-lkp@intel.com/ Reported-by: kernel test robot Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Ingo Molnar Cc: Peter Newman Cc: Borislav Petkov Cc: Reinette Chatre Link: https://lore.kernel.org/r/20231011064843.246592-1-maciej.wieczor-retman@intel.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1c0f00cd212d..f4e25fbdb1ab 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -895,7 +895,7 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of, return 0; } -/** +/* * rdt_bit_usage_show - Display current usage of resources * * A domain is a shared resource that can now be allocated differently. Here @@ -1134,6 +1134,7 @@ static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, * @d: The domain instance for which @closid is being tested. * @cbm: Capacity bitmask being tested. * @closid: Intended closid for @cbm. + * @type: CDP type of @r. * @exclusive: Only check if overlaps with exclusive resource groups * * Checks if provided @cbm intended to be used for @closid on domain @@ -1220,6 +1221,7 @@ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, /** * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. * * An exclusive resource group implies that there should be no sharing of * its allocated resources. At the time this group is considered to be @@ -1262,9 +1264,8 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) return true; } -/** +/* * rdtgroup_mode_write - Modify the resource group's mode - * */ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) @@ -1368,12 +1369,11 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, return size; } -/** +/* * rdtgroup_size_show - Display size in bytes of allocated regions * * The "size" file mirrors the layout of the "schemata" file, printing the * size in bytes of each region instead of the capacity bitmask. - * */ static int rdtgroup_size_show(struct kernfs_open_file *of, struct seq_file *s, void *v) -- Gitee From 50b339361c1d06de9d634f7fd5eb47e9e351daf1 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:00 -0500 Subject: [PATCH 498/953] x86/resctrl: Add multiple tasks to the resctrl group at once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit fe2a20ea0b0953189e57740debc7dcc789d1ea55 upstream. The resctrl task assignment for monitor or control group needs to be done one at a time. For example: $mount -t resctrl resctrl /sys/fs/resctrl/ $mkdir /sys/fs/resctrl/ctrl_grp1 $echo 123 > /sys/fs/resctrl/ctrl_grp1/tasks $echo 456 > /sys/fs/resctrl/ctrl_grp1/tasks $echo 789 > /sys/fs/resctrl/ctrl_grp1/tasks This is not user-friendly when dealing with hundreds of tasks. Support multiple task assignment in one command with tasks ids separated by commas. For example: $echo 123,456,789 > /sys/fs/resctrl/ctrl_grp1/tasks Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-2-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 9 ++++++++- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 25 ++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 4c6421e2aa31..178ab1d8f747 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -306,7 +306,14 @@ All groups contain the following files: "tasks": Reading this file shows the list of all tasks that belong to this group. Writing a task id to the file will add a task to the - group. If the group is a CTRL_MON group the task is removed from + group. Multiple tasks can be added by separating the task ids + with commas. Tasks will be assigned sequentially. Multiple + failures are not supported. A single failure encountered while + attempting to assign a task will cause the operation to abort and + already added tasks before the failure will remain in the group. + Failures will be logged to /sys/fs/resctrl/info/last_cmd_status. + + If the group is a CTRL_MON group the task is removed from whichever previous CTRL_MON group owned the task and also from any MON group that owned the task. If the group is a MON group, then the task must already belong to the CTRL_MON parent of this diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f4e25fbdb1ab..161b63c86328 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -696,11 +696,10 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdtgroup *rdtgrp; + char *pid_str; int ret = 0; pid_t pid; - if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) - return -EINVAL; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); @@ -715,7 +714,27 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, goto unlock; } - ret = rdtgroup_move_task(pid, rdtgrp, of); + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } unlock: rdtgroup_kn_unlock(of->kn); -- Gitee From 3c4a62dbe3dbd69cdbc67fef3616130f4f0fb2a2 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 3 Oct 2023 18:54:22 -0500 Subject: [PATCH 499/953] x86/resctrl: Simplify rftype flag definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 6846dc1a31d1894a7acf52d8442fe73b34091022 upstream. The rftype flags are bitmaps used for adding files under the resctrl filesystem. Some of these bitmap defines have one extra level of indirection which is not necessary. Drop the RF_* defines and simplify the macros. [ bp: Massage commit message. ] Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-3-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 9 +++------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 6 +++++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index ca86a96e80c2..5959026075c9 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -242,12 +242,9 @@ struct rdtgroup { */ #define RFTYPE_INFO BIT(0) #define RFTYPE_BASE BIT(1) -#define RF_CTRLSHIFT 4 -#define RF_MONSHIFT 5 -#define RF_TOPSHIFT 6 -#define RFTYPE_CTRL BIT(RF_CTRLSHIFT) -#define RFTYPE_MON BIT(RF_MONSHIFT) -#define RFTYPE_TOP BIT(RF_TOPSHIFT) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) #define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 161b63c86328..784d37c90c23 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3262,7 +3262,11 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); + if (rtype == RDTCTRL_GROUP) + files = RFTYPE_BASE | RFTYPE_CTRL; + else + files = RFTYPE_BASE | RFTYPE_MON; + ret = rdtgroup_add_files(kn, files); if (ret) { rdt_last_cmd_puts("kernfs fill error\n"); -- Gitee From a3c33303996e38f9369578fdea09ff9d57868e9d Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:02 -0500 Subject: [PATCH 500/953] x86/resctrl: Rename rftype flags for consistency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit d41592435cde9a658a1bd3b3fdfeb8db7b330d78 upstream. resctrl associates rftype flags with its files so that files can be chosen based on the resource, whether it is info or base, and if it is control or monitor type file. These flags use the RF_ as well as RFTYPE_ prefixes. Change the prefix to RFTYPE_ for all these flags to be consistent. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-4-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 10 +++--- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 44 +++++++++++++------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 5959026075c9..2c310fe7f1d6 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -247,10 +247,10 @@ struct rdtgroup { #define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) -#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) /* List of all resource groups */ extern struct list_head rdt_all_groups; @@ -266,7 +266,7 @@ void __exit rdtgroup_exit(void); * @mode: Access mode * @kf_ops: File operations * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RF_* or RFTYPE_* flags + * @fflags: File specific RFTYPE_* flags * @seq_show: Show content of the file * @write: Write to the file */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 784d37c90c23..933b1b13eb17 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1718,77 +1718,77 @@ static struct rftype res_common_files[] = { .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_last_cmd_status_show, - .fflags = RF_TOP_INFO, + .fflags = RFTYPE_TOP_INFO, }, { .name = "num_closids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_closids_show, - .fflags = RF_CTRL_INFO, + .fflags = RFTYPE_CTRL_INFO, }, { .name = "mon_features", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_mon_features_show, - .fflags = RF_MON_INFO, + .fflags = RFTYPE_MON_INFO, }, { .name = "num_rmids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_rmids_show, - .fflags = RF_MON_INFO, + .fflags = RFTYPE_MON_INFO, }, { .name = "cbm_mask", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_default_ctrl_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_cbm_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_cbm_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "shareable_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_shareable_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "bit_usage", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bit_usage_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_bandwidth", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_bw_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "bandwidth_gran", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bw_gran_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "delay_linear", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_delay_linear_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, /* * Platform specific which (if any) capabilities are provided by @@ -1807,7 +1807,7 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = max_threshold_occ_write, .seq_show = max_threshold_occ_show, - .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, }, { .name = "mbm_total_bytes_config", @@ -1854,7 +1854,7 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_schemata_write, .seq_show = rdtgroup_schemata_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "mode", @@ -1862,21 +1862,21 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_mode_write, .seq_show = rdtgroup_mode_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "size", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdtgroup_size_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "sparse_masks", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_has_sparse_bitmasks_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, }; @@ -1933,7 +1933,7 @@ void __init thread_throttle_mode_init(void) if (!rft) return; - rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; } void __init mbm_config_rftype_init(const char *config) @@ -1942,7 +1942,7 @@ void __init mbm_config_rftype_init(const char *config) rft = rdtgroup_get_rftype_by_name(config); if (rft) - rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; } /** @@ -2077,21 +2077,21 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) if (IS_ERR(kn_info)) return PTR_ERR(kn_info); - ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); if (ret) goto out_destroy; /* loop over enabled controls, these are all alloc_capable */ list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; - fflags = r->fflags | RF_CTRL_INFO; + fflags = r->fflags | RFTYPE_CTRL_INFO; ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); if (ret) goto out_destroy; } for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RF_MON_INFO; + fflags = r->fflags | RFTYPE_MON_INFO; sprintf(name, "%s_MON", r->name); ret = rdtgroup_mkdir_info_resdir(r, name, fflags); if (ret) @@ -3729,7 +3729,7 @@ static int __init rdtgroup_setup_root(void) list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); + ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RFTYPE_CTRL_BASE); if (ret) { kernfs_destroy_root(rdt_root); goto out; -- Gitee From a6e2d561fd0233b18a98063df5b3e632ba02e93c Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:03 -0500 Subject: [PATCH 501/953] x86/resctrl: Unwind properly from rdt_enable_ctx() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit df5f3a1dd8a6d3ddb1f07a10817f735194717422 upstream. rdt_enable_ctx() enables the features provided during resctrl mount. Additions to rdt_enable_ctx() are required to also modify error paths of rdt_enable_ctx() callers to ensure correct unwinding if errors are encountered after calling rdt_enable_ctx(). This is error prone. Introduce rdt_disable_ctx() to refactor the error unwinding of rdt_enable_ctx() to simplify future additions. This also simplifies cleanup in rdt_kill_sb(). Suggested-by: Reinette Chatre Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-5-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 53 ++++++++++++++++---------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 933b1b13eb17..6fc9739658ed 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2310,14 +2310,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) return 0; } -static void cdp_disable_all(void) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -} - /* * We don't allow rdtgroup directories to be created anywhere * except the root directory. Thus when looking for the rdtgroup @@ -2397,19 +2389,42 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **mon_data_kn); +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); +} + static int rdt_enable_ctx(struct rdt_fs_context *ctx) { int ret = 0; - if (ctx->enable_cdpl2) + if (ctx->enable_cdpl2) { ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } - if (!ret && ctx->enable_cdpl3) + if (ctx->enable_cdpl3) { ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } - if (!ret && ctx->enable_mba_mbps) + if (ctx->enable_mba_mbps) { ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + return 0; +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: return ret; } @@ -2517,13 +2532,13 @@ static int rdt_get_tree(struct fs_context *fc) } ret = rdt_enable_ctx(ctx); - if (ret < 0) - goto out_cdp; + if (ret) + goto out; ret = schemata_list_create(); if (ret) { schemata_list_destroy(); - goto out_mba; + goto out_ctx; } closid_init(); @@ -2582,11 +2597,8 @@ static int rdt_get_tree(struct fs_context *fc) kernfs_remove(kn_info); out_schemata_free: schemata_list_destroy(); -out_mba: - if (ctx->enable_mba_mbps) - set_mba_sc(false); -out_cdp: - cdp_disable_all(); +out_ctx: + rdt_disable_ctx(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); @@ -2818,12 +2830,11 @@ static void rdt_kill_sb(struct super_block *sb) cpus_read_lock(); mutex_lock(&rdtgroup_mutex); - set_mba_sc(false); + rdt_disable_ctx(); /*Put everything back to default values. */ for_each_alloc_capable_rdt_resource(r) reset_all_ctrls(r); - cdp_disable_all(); rmdir_all_sub(); rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; -- Gitee From 104ac61258b7b78dd5b586e38505558af37328f8 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:04 -0500 Subject: [PATCH 502/953] x86/resctrl: Move default group file creation to mount MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit d27567a0eb54be457b25e240593fdbd1c35c8618 upstream. The default resource group and its files are created during kernel init time. Upcoming changes will make some resctrl files optional based on a mount parameter. If optional files are to be added to the default group based on the mount option, then each new file needs to be created separately and call kernfs_activate() again. Create all files of the default resource group during resctrl mount, destroyed during unmount, to avoid scattering resctrl file addition across two separate code flows. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-6-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 58 +++++++++++++++----------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6fc9739658ed..d04dd495d7cf 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -54,6 +54,9 @@ static struct kernfs_node *kn_mondata; static struct seq_buf last_cmd_status; static char last_cmd_status_buf[512]; +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + struct dentry *debugfs_resctrl; void rdt_last_cmd_clear(void) @@ -2531,10 +2534,14 @@ static int rdt_get_tree(struct fs_context *fc) goto out; } - ret = rdt_enable_ctx(ctx); + ret = rdtgroup_setup_root(ctx); if (ret) goto out; + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + ret = schemata_list_create(); if (ret) { schemata_list_destroy(); @@ -2543,6 +2550,12 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); + ret = rdtgroup_add_files(rdtgroup_default.kn, RFTYPE_CTRL_BASE); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); if (ret < 0) goto out_schemata_free; @@ -2599,6 +2612,8 @@ static int rdt_get_tree(struct fs_context *fc) schemata_list_destroy(); out_ctx: rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); @@ -2669,7 +2684,6 @@ static int rdt_init_fs_context(struct fs_context *fc) if (!ctx) return -ENOMEM; - ctx->kfc.root = rdt_root; ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; fc->fs_private = &ctx->kfc; fc->ops = &rdt_fs_context_ops; @@ -2839,6 +2853,7 @@ static void rdt_kill_sb(struct super_block *sb) rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); + rdtgroup_destroy_root(); static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key); @@ -3720,10 +3735,8 @@ static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { .show_options = rdtgroup_show_options, }; -static int __init rdtgroup_setup_root(void) +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) { - int ret; - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, KERNFS_ROOT_CREATE_DEACTIVATED | KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, @@ -3731,6 +3744,20 @@ static int __init rdtgroup_setup_root(void) if (IS_ERR(rdt_root)) return PTR_ERR(rdt_root); + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void __init rdtgroup_setup_default(void) +{ mutex_lock(&rdtgroup_mutex); rdtgroup_default.closid = 0; @@ -3740,19 +3767,7 @@ static int __init rdtgroup_setup_root(void) list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RFTYPE_CTRL_BASE); - if (ret) { - kernfs_destroy_root(rdt_root); - goto out; - } - - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - kernfs_activate(rdtgroup_default.kn); - -out: mutex_unlock(&rdtgroup_mutex); - - return ret; } static void domain_destroy_mon_state(struct rdt_domain *d) @@ -3874,13 +3889,11 @@ int __init rdtgroup_init(void) seq_buf_init(&last_cmd_status, last_cmd_status_buf, sizeof(last_cmd_status_buf)); - ret = rdtgroup_setup_root(); - if (ret) - return ret; + rdtgroup_setup_default(); ret = sysfs_create_mount_point(fs_kobj, "resctrl"); if (ret) - goto cleanup_root; + return ret; ret = register_filesystem(&rdt_fs_type); if (ret) @@ -3913,8 +3926,6 @@ int __init rdtgroup_init(void) cleanup_mountpoint: sysfs_remove_mount_point(fs_kobj, "resctrl"); -cleanup_root: - kernfs_destroy_root(rdt_root); return ret; } @@ -3924,5 +3935,4 @@ void __exit rdtgroup_exit(void) debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); sysfs_remove_mount_point(fs_kobj, "resctrl"); - kernfs_destroy_root(rdt_root); } -- Gitee From 1fd428e0432c21aaaaa2a8ae48eb1ddbe89d9fdb Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:05 -0500 Subject: [PATCH 503/953] x86/resctrl: Introduce "-o debug" mount option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit cb07d71f01017b7c2885ed629da9b973cb56b1d2 upstream. Add "-o debug" option to mount resctrl filesystem in debug mode. When in debug mode resctrl displays files that have the new RFTYPE_DEBUG flag to help resctrl debugging. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-7-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 5 ++++- arch/x86/kernel/cpu/resctrl/internal.h | 2 ++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 178ab1d8f747..68f11611f341 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -35,7 +35,7 @@ about the feature from resctrl's info directory. To use the feature mount the file system:: - # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl + # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps][,debug]] /sys/fs/resctrl mount options are: @@ -46,6 +46,9 @@ mount options are: "mba_MBps": Enable the MBA Software Controller(mba_sc) to specify MBA bandwidth in MBps +"debug": + Make debug files accessible. Available debug files are annotated with + "Available only with debug option". L2 and L3 CDP are controlled separately. diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 2c310fe7f1d6..77cfbbe251d5 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -58,6 +58,7 @@ struct rdt_fs_context { bool enable_cdpl2; bool enable_cdpl3; bool enable_mba_mbps; + bool enable_debug; }; static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) @@ -247,6 +248,7 @@ struct rdtgroup { #define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) #define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) #define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) #define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d04dd495d7cf..c419603dba16 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -59,6 +59,8 @@ static void rdtgroup_destroy_root(void); struct dentry *debugfs_resctrl; +static bool resctrl_debug; + void rdt_last_cmd_clear(void) { lockdep_assert_held(&rdtgroup_mutex); @@ -1894,6 +1896,9 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) lockdep_assert_held(&rdtgroup_mutex); + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + for (rft = rfts; rft < rfts + len; rft++) { if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { ret = rdtgroup_add_file(kn, rft); @@ -2397,6 +2402,8 @@ static void rdt_disable_ctx(void) resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); set_mba_sc(false); + + resctrl_debug = false; } static int rdt_enable_ctx(struct rdt_fs_context *ctx) @@ -2421,6 +2428,9 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx) goto out_cdpl3; } + if (ctx->enable_debug) + resctrl_debug = true; + return 0; out_cdpl3: @@ -2625,6 +2635,7 @@ enum rdt_param { Opt_cdp, Opt_cdpl2, Opt_mba_mbps, + Opt_debug, nr__rdt_params }; @@ -2632,6 +2643,7 @@ static const struct fs_parameter_spec rdt_fs_parameters[] = { fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), {} }; @@ -2657,6 +2669,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) return -EINVAL; ctx->enable_mba_mbps = true; return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; } return -EINVAL; @@ -3725,6 +3740,9 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) seq_puts(seq, ",mba_MBps"); + if (resctrl_debug) + seq_puts(seq, ",debug"); + return 0; } -- Gitee From fd61a75a14ca394e28ec4a65a8bf92acdefd4fcc Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:06 -0500 Subject: [PATCH 504/953] x86/resctrl: Display CLOSID for resource group MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit ca8dad225e237493f19b1c5d4a8531f13a9b078f upstream. In x86, hardware uses CLOSID to identify a control group. When a user creates a control group this information is not visible to the user. It can help resctrl debugging. Add CLOSID(ctrl_hw_id) to the control groups display in the resctrl interface. Users can see this detail when resctrl is mounted with the "-o debug" option. Other architectures do not use "CLOSID". Use the names ctrl_hw_id to refer to "CLOSID" in an effort to keep the naming generic. For example: $cat /sys/fs/resctrl/ctrl_grp1/ctrl_hw_id 1 Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-8-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 4 ++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 68f11611f341..7412252f95a7 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -359,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain: file. On successful pseudo-locked region creation the mode will automatically change to "pseudo-locked". +"ctrl_hw_id": + Available only with debug option. The identifier used by hardware + for the control group. On x86 this is the CLOSID. + When monitoring is enabled all MON groups will also contain: "mon_data": diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index c419603dba16..069a9c395f6c 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -779,6 +779,22 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of, return ret; } +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + #ifdef CONFIG_PROC_CPU_RESCTRL /* @@ -1883,6 +1899,13 @@ static struct rftype res_common_files[] = { .seq_show = rdt_has_sparse_bitmasks_show, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, }; -- Gitee From 58c77fa7318046bc2636888238a2668edcad94ca Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:07 -0500 Subject: [PATCH 505/953] x86/resctrl: Add support for the files of MON groups only MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 918f211b5e4e709e91acf856967a850569c96b71 upstream. Files unique to monitoring groups have the RFTYPE_MON flag. When a new monitoring group is created the resctrl files with flags RFTYPE_BASE (files common to all resource groups) and RFTYPE_MON (files unique to monitoring groups) are created to support interacting with the new monitoring group. A resource group can support both monitoring and control, also termed a CTRL_MON resource group. CTRL_MON groups should get both monitoring and control resctrl files but that is not the case. Only the RFTYPE_BASE and RFTYPE_CTRL files are created for CTRL_MON groups. Ensure that files with the RFTYPE_MON flag are created for CTRL_MON groups. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Ilpo Järvinen Reviewed-by: Reinette Chatre Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-9-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 069a9c395f6c..910db69f9fa5 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2553,6 +2553,7 @@ static void schemata_list_destroy(void) static int rdt_get_tree(struct fs_context *fc) { struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; struct rdt_domain *dom; struct rdt_resource *r; int ret; @@ -2583,7 +2584,10 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); - ret = rdtgroup_add_files(rdtgroup_default.kn, RFTYPE_CTRL_BASE); + if (rdt_mon_capable) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); if (ret) goto out_schemata_free; @@ -3273,8 +3277,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, enum rdt_group_type rtype, struct rdtgroup **r) { struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; struct kernfs_node *kn; - uint files = 0; int ret; prdtgrp = rdtgroup_kn_lock_live(parent_kn); @@ -3326,10 +3330,13 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - if (rtype == RDTCTRL_GROUP) + if (rtype == RDTCTRL_GROUP) { files = RFTYPE_BASE | RFTYPE_CTRL; - else + if (rdt_mon_capable) + files |= RFTYPE_MON; + } else { files = RFTYPE_BASE | RFTYPE_MON; + } ret = rdtgroup_add_files(kn, files); if (ret) { -- Gitee From cd655e3372be508dea2bf69c9036c4c564dc5caf Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:08 -0500 Subject: [PATCH 506/953] x86/resctrl: Display RMID of resource group MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 4cee14bcb14881aae81d60f106a335c68553ac1f upstream. In x86, hardware uses RMID to identify a monitoring group. When a user creates a monitor group these details are not visible. These details can help resctrl debugging. Add RMID(mon_hw_id) to the monitor groups display in the resctrl interface. Users can see these details when resctrl is mounted with "-o debug" option. Add RFTYPE_MON_BASE that complements existing RFTYPE_CTRL_BASE and represents files belonging to monitoring groups. Other architectures do not use "RMID". Use the name mon_hw_id to refer to "RMID" in an effort to keep the naming generic. For example: $cat /sys/fs/resctrl/mon_groups/mon_grp1/mon_hw_id 3 Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-10-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 4 ++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 7412252f95a7..a6279df64a9d 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -376,6 +376,10 @@ When monitoring is enabled all MON groups will also contain: the sum for all tasks in the CTRL_MON group and all tasks in MON groups. Please see example section for more details on usage. +"mon_hw_id": + Available only with debug option. The identifier used by hardware + for the monitor group. On x86 this is the RMID. + Resource allocation rules ------------------------- diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 77cfbbe251d5..52e7e7deee10 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -253,6 +253,7 @@ struct rdtgroup { #define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) #define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) #define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) /* List of all resource groups */ extern struct list_head rdt_all_groups; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 910db69f9fa5..2b69e560b05f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -795,6 +795,22 @@ static int rdtgroup_closid_show(struct kernfs_open_file *of, return ret; } +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + #ifdef CONFIG_PROC_CPU_RESCTRL /* @@ -1869,6 +1885,13 @@ static struct rftype res_common_files[] = { .seq_show = rdtgroup_tasks_show, .fflags = RFTYPE_BASE, }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, { .name = "schemata", .mode = 0644, -- Gitee From 271fb4942b338656f95eb0263079d184d4764859 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 1 Nov 2023 14:26:15 -0700 Subject: [PATCH 507/953] x86/resctrl: Fix unused variable warning in cache_alloc_hsw_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 1b908debf53ff3cf0e43e0fa51e7319a23518e6c upstream. In a "W=1" build gcc throws a warning: arch/x86/kernel/cpu/resctrl/core.c: In function ‘cache_alloc_hsw_probe’: arch/x86/kernel/cpu/resctrl/core.c:139:16: warning: variable ‘h’ set but not used Switch from wrmsr_safe() to wrmsrl_safe(), and from rdmsr() to rdmsrl() using a single u64 argument for the MSR value instead of the pair of u32 for the high and low halves. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Babu Moger Acked-by: Reinette Chatre Link: https://lore.kernel.org/r/ZULCd/TGJL9Dmncf@agluck-desk3 Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index a8a6c6dd689c..851b13a43dcd 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -136,15 +136,15 @@ static inline void cache_alloc_hsw_probe(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl; - u32 l, h, max_cbm = BIT_MASK(20) - 1; + u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; - if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return; - rdmsr(MSR_IA32_L3_CBM_BASE, l, h); + rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); /* If all the bits were set in MSR, return success */ - if (l != max_cbm) + if (l3_cbm_0 != max_cbm) return; hw_res->num_closid = 4; -- Gitee From 26cbdc5db3464858d15214cd774ea544d7d5777b Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 24 Jan 2024 11:52:56 -0600 Subject: [PATCH 508/953] x86/resctrl: Remove redundant variable in mbm_config_write_domain() ANBZ: #8626 commit fc747eebef734563cf68a512f57937c8f231834a upstream. The kernel test robot reported the following warning after commit 54e35eb8611c ("x86/resctrl: Read supported bandwidth sources from CPUID"). even though the issue is present even in the original commit 92bd5a139033 ("x86/resctrl: Add interface to write mbm_total_bytes_config") which added this function. The reported warning is: $ make C=1 CHECK=scripts/coccicheck arch/x86/kernel/cpu/resctrl/rdtgroup.o ... arch/x86/kernel/cpu/resctrl/rdtgroup.c:1621:5-8: Unneeded variable: "ret". Return "0" on line 1655 Remove the local variable 'ret'. [ bp: Massage commit message, make mbm_config_write_domain() void. ] Fixes: 92bd5a139033 ("x86/resctrl: Add interface to write mbm_total_bytes_config") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202401241810.jbd8Ipa1-lkp@intel.com/ Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Acked-by: Reinette Chatre Link: https://lore.kernel.org/r/202401241810.jbd8Ipa1-lkp@intel.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2b69e560b05f..aa24343f1d23 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1614,11 +1614,10 @@ static void mon_event_config_write(void *info) wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); } -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) +static void mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) { struct mon_config_info mon_info = {0}; - int ret = 0; /* * Read the current config value first. If both are the same then @@ -1627,7 +1626,7 @@ static int mbm_config_write_domain(struct rdt_resource *r, mon_info.evtid = evtid; mondata_config_read(d, &mon_info); if (mon_info.mon_config == val) - goto out; + return; mon_info.mon_config = val; @@ -1650,9 +1649,6 @@ static int mbm_config_write_domain(struct rdt_resource *r, * mbm_local and mbm_total counts for all the RMIDs. */ resctrl_arch_reset_rmid_all(r, d); - -out: - return ret; } static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) @@ -1661,7 +1657,6 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; - int ret = 0; next: if (!tok || tok[0] == '\0') @@ -1690,9 +1685,7 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { - ret = mbm_config_write_domain(r, d, evtid, val); - if (ret) - return -EINVAL; + mbm_config_write_domain(r, d, evtid, val); goto next; } } -- Gitee From cfd3290a0514f0b008b542cdc98e17ade49700e1 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:15 +0000 Subject: [PATCH 509/953] tick/nohz: Move tick_nohz_full_mask declaration outside the #ifdef ANBZ: #8626 commit 31a5c0b7c674977889ce721d69101bc35f25e041 upstream. tick_nohz_full_mask lists the CPUs that are nohz_full. This is only needed when CONFIG_NO_HZ_FULL is defined. tick_nohz_full_cpu() allows a specific CPU to be tested against the mask, and evaluates to false when CONFIG_NO_HZ_FULL is not defined. The resctrl code needs to pick a CPU to run some work on, a new helper prefers housekeeping CPUs by examining the tick_nohz_full_mask. Hiding the declaration behind #ifdef CONFIG_NO_HZ_FULL forces all the users to be behind an #ifdef too. Move the tick_nohz_full_mask declaration, this lets callers drop the #ifdef, and guard access to tick_nohz_full_mask with IS_ENABLED() or something like tick_nohz_full_cpu(). The definition does not need to be moved as any callers should be removed at compile time unless CONFIG_NO_HZ_FULL is defined. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Thomas Gleixner Acked-by: Reinette Chatre # for resctrl dependency Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-2-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- include/linux/tick.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/tick.h b/include/linux/tick.h index 9459fef5b857..65af90ca409a 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,9 +174,16 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ +/* + * Mask of CPUs that are nohz_full. + * + * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() + * check. + */ +extern cpumask_var_t tick_nohz_full_mask; + #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; -extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { -- Gitee From 9f2e7cec0f82d5f18d48e033fc83764d5f39a7ed Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:16 +0000 Subject: [PATCH 510/953] x86/resctrl: Free rmid_ptrs from resctrl_exit() ANBZ: #8626 commit 3f7b07380d58cfbb6a2d3aa672dcc76c0f4b0745 upstream. rmid_ptrs[] is allocated from dom_data_init() but never free()d. While the exit text ends up in the linker script's DISCARD section, the direction of travel is for resctrl to be/have loadable modules. Add resctrl_put_mon_l3_config() to cleanup any memory allocated by rdt_get_mon_l3_config(). There is no reason to backport this to a stable kernel. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-3-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 6 ++++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 15 +++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 851b13a43dcd..e6e6a34168a4 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -994,8 +994,14 @@ late_initcall(resctrl_late_init); static void __exit resctrl_exit(void) { + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + cpuhp_remove_state(rdt_online); + rdtgroup_exit(); + + if (r->mon_capable) + rdt_put_mon_l3_config(); } __exitcall(resctrl_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 52e7e7deee10..61c763604fc9 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -544,6 +544,7 @@ void closid_free(int closid); int alloc_rmid(void); void free_rmid(u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); +void __exit rdt_put_mon_l3_config(void); bool __init rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3a6c069614eb..3a73db0579d8 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -719,6 +719,16 @@ static int dom_data_init(struct rdt_resource *r) return 0; } +static void __exit dom_data_exit(void) +{ + mutex_lock(&rdtgroup_mutex); + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + static struct mon_evt llc_occupancy_event = { .name = "llc_occupancy", .evtid = QOS_L3_OCCUP_EVENT_ID, @@ -814,6 +824,11 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } +void __exit rdt_put_mon_l3_config(void) +{ + dom_data_exit(); +} + void __init intel_rdt_mbm_apply_quirk(void) { int cf_index; -- Gitee From 8858fb068e8d9ec65a787149886bf54194885c76 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:17 +0000 Subject: [PATCH 511/953] x86/resctrl: Create helper for RMID allocation and mondata dir creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit b1de313979af99dc0f999656fc99bbcb52559a38 upstream. When monitoring is supported, each monitor and control group is allocated an RMID. For control groups, rdtgroup_mkdir_ctrl_mon() later goes on to allocate the CLOSID. MPAM's equivalent of RMID are not an independent number, so can't be allocated until the CLOSID is known. An RMID allocation for one CLOSID may fail, whereas another may succeed depending on how many monitor groups a control group has. The RMID allocation needs to move to be after the CLOSID has been allocated. Move the RMID allocation and mondata dir creation to a helper. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Ilpo Järvinen Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Tested-by: Shaopeng Tan Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-4-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 42 +++++++++++++++++--------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index aa24343f1d23..4ea5a871be49 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3288,6 +3288,30 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) return ret; } +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!rdt_mon_capable) + return 0; + + ret = alloc_rmid(); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, const char *name, umode_t mode, enum rdt_group_type rtype, struct rdtgroup **r) @@ -3360,20 +3384,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - goto out_destroy; - } - rdtgrp->mon.rmid = ret; + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_destroy; - ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_idfree; - } - } kernfs_activate(kn); /* @@ -3381,8 +3395,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, */ return 0; -out_idfree: - free_rmid(rdtgrp->mon.rmid); out_destroy: kernfs_put(rdtgrp->kn); kernfs_remove(rdtgrp->kn); -- Gitee From 61fddfec39470c4f5ba4a824e60ca9534a68c3ab Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:18 +0000 Subject: [PATCH 512/953] x86/resctrl: Move RMID allocation out of mkdir_rdt_prepare() ANBZ: #8626 commit 311639e9512bb3af2abae32be9322b8a9b30eaa1 upstream. RMIDs are allocated for each monitor or control group directory, because each of these needs its own RMID. For control groups, rdtgroup_mkdir_ctrl_mon() later goes on to allocate the CLOSID. MPAM's equivalent of RMID is not an independent number, so can't be allocated until the CLOSID is known. An RMID allocation for one CLOSID may fail, whereas another may succeed depending on how many monitor groups a control group has. The RMID allocation needs to move to be after the CLOSID has been allocated. Move the RMID allocation out of mkdir_rdt_prepare() to occur in its caller, after the mkdir_rdt_prepare() call. This allows the RMID allocator to know the CLOSID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-5-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 35 +++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 4ea5a871be49..f455a10b74ab 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3312,6 +3312,12 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) return 0; } +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (rdt_mon_capable) + free_rmid(rgrp->mon.rmid); +} + static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, const char *name, umode_t mode, enum rdt_group_type rtype, struct rdtgroup **r) @@ -3384,12 +3390,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - /* * The caller unlocks the parent_kn upon success. */ @@ -3408,7 +3408,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) { kernfs_remove(rgrp->kn); - free_rmid(rgrp->mon.rmid); rdtgroup_remove(rgrp); } @@ -3430,12 +3429,21 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, prgrp = rdtgrp->mon.parent; rdtgrp->closid = prgrp->closid; + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + /* * Add the rdtgrp to the list of rdtgrps the parent * ctrl_mon group has to track. */ list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); +out_unlock: rdtgroup_kn_unlock(parent_kn); return ret; } @@ -3466,9 +3474,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, ret = 0; rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + ret = rdtgroup_init_alloc(rdtgrp); if (ret < 0) - goto out_id_free; + goto out_rmid_free; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); @@ -3488,7 +3503,9 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, out_del_list: list_del(&rdtgrp->rdtgroup_list); -out_id_free: +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: closid_free(closid); out_common_fail: mkdir_rdt_prepare_clean(rdtgrp); -- Gitee From a0f508a09508c807fb096ce6bbdee14a189be36d Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:19 +0000 Subject: [PATCH 513/953] x86/resctrl: Track the closid with the rmid ANBZ: #8626 commit 40fc735b78f0c81cea7d1c511cfd83892cb4d679 upstream. x86's RMID are independent of the CLOSID. An RMID can be allocated, used and freed without considering the CLOSID. MPAM's equivalent feature is PMG, which is not an independent number, it extends the CLOSID/PARTID space. For MPAM, only PMG-bits worth of 'RMID' can be allocated for a single CLOSID. i.e. if there is 1 bit of PMG space, then each CLOSID can have two monitor groups. To allow resctrl to disambiguate RMID values for different CLOSID, everything in resctrl that keeps an RMID value needs to know the CLOSID too. This will always be ignored on x86. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Xin Hao Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-6-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 7 +++ arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 73 +++++++++++++++-------- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 4 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++-- include/linux/resctrl.h | 16 ++++- 6 files changed, 77 insertions(+), 37 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 255a78d9d906..cc6e1bce7b1a 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -7,6 +7,13 @@ #include #include +/* + * This value can never be a valid CLOSID, and is used when mapping a + * (closid, rmid) pair to an index and back. On x86 only the RMID is + * needed. The index is a software defined value. + */ +#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) + /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 61c763604fc9..ae0e3338abc4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -542,7 +542,7 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); int alloc_rmid(void); -void free_rmid(u32 rmid); +void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit rdt_put_mon_l3_config(void); bool __init rdt_cpu_has(int flag); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3a73db0579d8..3dad4134d2c9 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -24,7 +24,20 @@ #include "internal.h" +/** + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ struct rmid_entry { + u32 closid; u32 rmid; int busy; struct list_head list; @@ -136,7 +149,7 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 rmid) +static inline struct rmid_entry *__rmid_entry(u32 closid, u32 rmid) { struct rmid_entry *entry; @@ -190,7 +203,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid) + u32 unused, u32 rmid, + enum resctrl_event_id eventid) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; @@ -230,7 +244,8 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val) + u32 unused, u32 rmid, enum resctrl_event_id eventid, + u64 *val) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -285,9 +300,9 @@ void __check_limbo(struct rdt_domain *d, bool force_free) if (nrmid >= r->num_rmid) break; - entry = __rmid_entry(nrmid); + entry = __rmid_entry(X86_RESCTRL_EMPTY_CLOSID, nrmid);// temporary - if (resctrl_arch_rmid_read(r, d, entry->rmid, + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val)) { rmid_dirty = true; } else { @@ -342,7 +357,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) cpu = get_cpu(); list_for_each_entry(d, &r->domains, list) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->rmid, + err = resctrl_arch_rmid_read(r, d, entry->closid, + entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val); if (err || val <= resctrl_rmid_realloc_threshold) @@ -366,7 +382,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) list_add_tail(&entry->list, &rmid_free_lru); } -void free_rmid(u32 rmid) +void free_rmid(u32 closid, u32 rmid) { struct rmid_entry *entry; @@ -375,7 +391,7 @@ void free_rmid(u32 rmid) lockdep_assert_held(&rdtgroup_mutex); - entry = __rmid_entry(rmid); + entry = __rmid_entry(closid, rmid); if (is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); @@ -383,8 +399,8 @@ void free_rmid(u32 rmid) list_add_tail(&entry->list, &rmid_free_lru); } -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, - enum resctrl_event_id evtid) +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) { switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: @@ -396,20 +412,21 @@ static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, } } -static int __mon_event_count(u32 rmid, struct rmid_read *rr) +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) { struct mbm_state *m; u64 tval = 0; if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); - m = get_mbm_state(rr->d, rmid, rr->evtid); + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); if (m) memset(m, 0, sizeof(struct mbm_state)); return 0; } - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval); if (rr->err) return rr->err; @@ -421,6 +438,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) /* * mbm_bw_count() - Update bw count from values previously read by * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. * @rmid: The rmid used to identify the cached mbm_state. * @rr: The struct rmid_read populated by __mon_event_count(). * @@ -429,7 +447,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) * __mon_event_count() is compared with the chunks value from the previous * invocation. This must be called once per second to maintain values in MBps. */ -static void mbm_bw_count(u32 rmid, struct rmid_read *rr) +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) { struct mbm_state *m = &rr->d->mbm_local[rmid]; u64 cur_bw, bytes, cur_bytes; @@ -456,7 +474,7 @@ void mon_event_count(void *info) rdtgrp = rr->rgrp; - ret = __mon_event_count(rdtgrp->mon.rmid, rr); + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); /* * For Ctrl groups read data from child monitor groups and @@ -467,7 +485,8 @@ void mon_event_count(void *info) if (rdtgrp->type == RDTCTRL_GROUP) { list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->mon.rmid, rr) == 0) + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) ret = 0; } } @@ -578,7 +597,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); } -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) { struct rmid_read rr; @@ -593,12 +613,12 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) if (is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; - __mon_event_count(rmid, &rr); + __mon_event_count(closid, rmid, &rr); } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; - __mon_event_count(rmid, &rr); + __mon_event_count(closid, rmid, &rr); /* * Call the MBA software controller only for the @@ -606,7 +626,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) * the software controller explicitly. */ if (is_mba_sc(NULL)) - mbm_bw_count(rmid, &rr); + mbm_bw_count(closid, rmid, &rr); } } @@ -663,11 +683,11 @@ void mbm_handle_overflow(struct work_struct *work) d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->mon.rmid); + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->mon.rmid); + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); if (is_mba_sc(NULL)) update_mba_bw(prgrp, d); @@ -710,10 +730,11 @@ static int dom_data_init(struct rdt_resource *r) } /* - * RMID 0 is special and is always allocated. It's used for all - * tasks that are not monitored. + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). */ - entry = __rmid_entry(0); + entry = __rmid_entry(RESCTRL_RESERVED_CLOSID, RESCTRL_RESERVED_RMID); list_del(&entry->list); return 0; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8f559eeae08e..65bee6f11015 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -752,7 +752,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * anymore when this group would be used for pseudo-locking. This * is safe to call on platforms not capable of monitoring. */ - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); ret = 0; goto out; @@ -787,7 +787,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) ret = rdtgroup_locksetup_user_restore(rdtgrp); if (ret) { - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); return ret; } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f455a10b74ab..ad7da7254f4d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2837,7 +2837,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) head = &rdtgrp->mon.crdtgrp_list; list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->mon.rmid); + free_rmid(sentry->closid, sentry->mon.rmid); list_del(&sentry->mon.crdtgrp_list); if (atomic_read(&sentry->waitcount) != 0) @@ -2877,7 +2877,7 @@ static void rmdir_all_sub(void) cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); kernfs_remove(rdtgrp->kn); list_del(&rdtgrp->rdtgroup_list); @@ -3305,7 +3305,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); if (ret) { rdt_last_cmd_puts("kernfs subdir error\n"); - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); return ret; } @@ -3315,7 +3315,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) { if (rdt_mon_capable) - free_rmid(rgrp->mon.rmid); + free_rmid(rgrp->closid, rgrp->mon.rmid); } static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, @@ -3574,7 +3574,7 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) update_closid_rmid(tmpmask, NULL); rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); /* * Remove the rdtgrp from the parent ctrl_mon group's list @@ -3620,8 +3620,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); update_closid_rmid(tmpmask, NULL); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); closid_free(rdtgrp->closid); - free_rmid(rdtgrp->mon.rmid); rdtgroup_ctrl_remove(rdtgrp); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 66942d7fba7f..bd4ec22b5a96 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -6,6 +6,10 @@ #include #include +/* CLOSID, RMID value used by the default control group */ +#define RESCTRL_RESERVED_CLOSID 0 +#define RESCTRL_RESERVED_RMID 0 + #ifdef CONFIG_PROC_CPU_RESCTRL int proc_resctrl_show(struct seq_file *m, @@ -225,6 +229,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * for this resource and domain. * @r: resource that the counter should be read from. * @d: domain that the counter should be read from. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid + * only. * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. @@ -235,20 +242,25 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * 0 on success, or -EIO, -EINVAL etc on error. */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val); + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val); + /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid * and eventid. * @r: The domain's resource. * @d: The rmid's domain. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid only. * @rmid: The rmid whose counter values should be reset. * @eventid: The eventid whose counter values should be reset. * * This can be called from any CPU. */ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid); + u32 closid, u32 rmid, + enum resctrl_event_id eventid); /** * resctrl_arch_reset_rmid_all() - Reset all private state associated with -- Gitee From b5728bef4aec551c67e6bf6fcc2d5208b5bcc938 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:20 +0000 Subject: [PATCH 514/953] x86/resctrl: Access per-rmid structures by index ANBZ: #8626 commit 6791e0ea30711b937d5cb6e2b17f59a2a2af5386 upstream. x86 systems identify traffic using the CLOSID and RMID. The CLOSID is used to lookup the control policy, the RMID is used for monitoring. For x86 these are independent numbers. Arm's MPAM has equivalent features PARTID and PMG, where the PARTID is used to lookup the control policy. The PMG in contrast is a small number of bits that are used to subdivide PARTID when monitoring. The cache-occupancy monitors require the PARTID to be specified when monitoring. This means MPAM's PMG field is not unique. There are multiple PMG-0, one per allocated CLOSID/PARTID. If PMG is treated as equivalent to RMID, it cannot be allocated as an independent number. Bitmaps like rmid_busy_llc need to be sized by the number of unique entries for this resource. Treat the combined CLOSID and RMID as an index, and provide architecture helpers to pack and unpack an index. This makes the MPAM values unique. The domain's rmid_busy_llc and rmid_ptrs[] are then sized by index, as are domain mbm_local[] and mbm_total[]. x86 can ignore the CLOSID field when packing and unpacking an index, and report as many indexes as RMID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-7-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 17 +++++ arch/x86/kernel/cpu/resctrl/core.c | 13 ++-- arch/x86/kernel/cpu/resctrl/internal.h | 4 +- arch/x86/kernel/cpu/resctrl/monitor.c | 98 +++++++++++++++++--------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 13 ++-- 5 files changed, 100 insertions(+), 45 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index cc6e1bce7b1a..db4c84dde2d5 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -101,6 +101,23 @@ static inline void resctrl_sched_in(struct task_struct *tsk) __resctrl_sched_in(tsk); } +static inline u32 resctrl_arch_system_num_rmid_idx(void) +{ + /* RMID are independent numbers for x86. num_rmid_idx == num_rmid */ + return boot_cpu_data.x86_cache_max_rmid + 1; +} + +static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + *rmid = idx; + *closid = X86_RESCTRL_EMPTY_CLOSID; +} + +static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) +{ + return rmid; +} + void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e6e6a34168a4..97438d844606 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -587,7 +587,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) mbm_setup_overflow_handler(d, 0); } if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(r, d)) { + has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); cqm_setup_limbo_handler(d, 0); } @@ -598,11 +598,12 @@ static void clear_closid_rmid(int cpu) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - state->default_closid = 0; - state->default_rmid = 0; - state->cur_closid = 0; - state->cur_rmid = 0; - wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); + state->default_closid = RESCTRL_RESERVED_CLOSID; + state->default_rmid = RESCTRL_RESERVED_RMID; + state->cur_closid = RESCTRL_RESERVED_CLOSID; + state->cur_rmid = RESCTRL_RESERVED_RMID; + wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID, + RESCTRL_RESERVED_CLOSID); } static int resctrl_online_cpu(unsigned int cpu) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index ae0e3338abc4..cbba782acd0c 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -8,6 +8,8 @@ #include #include +#include + #define L3_QOS_CDP_ENABLE 0x01ULL #define L2_QOS_CDP_ENABLE 0x01ULL @@ -558,7 +560,7 @@ void __init intel_rdt_mbm_apply_quirk(void); bool is_mba_sc(struct rdt_resource *r); void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); +bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3dad4134d2c9..bc5ceef143ab 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -149,12 +149,29 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 closid, u32 rmid) +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) { struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); - entry = &rmid_ptrs[rmid]; - WARN_ON(entry->rmid != rmid); + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); return entry; } @@ -284,8 +301,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, void __check_limbo(struct rdt_domain *d, bool force_free) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; - u32 crmid = 1, nrmid; + u32 idx, cur_idx = 1; bool rmid_dirty; u64 val = 0; @@ -296,12 +314,11 @@ void __check_limbo(struct rdt_domain *d, bool force_free) * RMID and move it to the free list when the counter reaches 0. */ for (;;) { - nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); - if (nrmid >= r->num_rmid) + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) break; - entry = __rmid_entry(X86_RESCTRL_EMPTY_CLOSID, nrmid);// temporary - + entry = __rmid_entry(idx); if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val)) { rmid_dirty = true; @@ -310,19 +327,21 @@ void __check_limbo(struct rdt_domain *d, bool force_free) } if (force_free || !rmid_dirty) { - clear_bit(entry->rmid, d->rmid_busy_llc); + clear_bit(idx, d->rmid_busy_llc); if (!--entry->busy) { rmid_limbo_count--; list_add_tail(&entry->list, &rmid_free_lru); } } - crmid = nrmid + 1; + cur_idx = idx + 1; } } -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) +bool has_busy_rmid(struct rdt_domain *d) { - return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; } /* @@ -352,6 +371,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) struct rdt_domain *d; int cpu, err; u64 val = 0; + u32 idx; + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; cpu = get_cpu(); @@ -369,9 +391,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) * For the first limbo RMID in the domain, * setup up the limbo worker. */ - if (!has_busy_rmid(r, d)) + if (!has_busy_rmid(d)) cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); - set_bit(entry->rmid, d->rmid_busy_llc); + set_bit(idx, d->rmid_busy_llc); entry->busy++; } put_cpu(); @@ -384,14 +406,21 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) void free_rmid(u32 closid, u32 rmid) { + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); struct rmid_entry *entry; - if (!rmid) - return; - lockdep_assert_held(&rdtgroup_mutex); - entry = __rmid_entry(closid, rmid); + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); if (is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); @@ -402,11 +431,13 @@ void free_rmid(u32 closid, u32 rmid) static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id evtid) { + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[rmid]; + return &d->mbm_total[idx]; case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[rmid]; + return &d->mbm_local[idx]; default: return NULL; } @@ -449,7 +480,8 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) */ static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) { - struct mbm_state *m = &rr->d->mbm_local[rmid]; + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; u64 cur_bw, bytes, cur_bytes; cur_bytes = rr->val; @@ -538,9 +570,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct mbm_state *pmbm_data, *cmbm_data; struct rdt_resource *r_mba; struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; struct list_head *head; struct rdtgroup *entry; - u32 cur_bw, user_bw; if (!is_mbm_local_enabled()) return; @@ -549,7 +581,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) closid = rgrp->closid; rmid = rgrp->mon.rmid; - pmbm_data = &dom_mbm->mbm_local[rmid]; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); if (!dom_mba) { @@ -638,17 +671,15 @@ void cqm_handle_limbo(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); int cpu = smp_processor_id(); - struct rdt_resource *r; struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; d = container_of(work, struct rdt_domain, cqm_limbo.work); __check_limbo(d, false); - if (has_busy_rmid(r, d)) + if (has_busy_rmid(d)) schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); mutex_unlock(&rdtgroup_mutex); @@ -713,19 +744,20 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) static int dom_data_init(struct rdt_resource *r) { + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry = NULL; - int i, nr_rmids; + u32 idx; + int i; - nr_rmids = r->num_rmid; - rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); if (!rmid_ptrs) return -ENOMEM; - for (i = 0; i < nr_rmids; i++) { + for (i = 0; i < idx_limit; i++) { entry = &rmid_ptrs[i]; INIT_LIST_HEAD(&entry->list); - entry->rmid = i; + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); list_add_tail(&entry->list, &rmid_free_lru); } @@ -734,7 +766,9 @@ static int dom_data_init(struct rdt_resource *r) * are always allocated. These are used for the rdtgroup_default * control group, which will be setup later in rdtgroup_init(). */ - entry = __rmid_entry(RESCTRL_RESERVED_CLOSID, RESCTRL_RESERVED_RMID); + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); list_del(&entry->list); return 0; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ad7da7254f4d..a7dbc0e7e559 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3853,8 +3853,8 @@ static void __init rdtgroup_setup_default(void) { mutex_lock(&rdtgroup_mutex); - rdtgroup_default.closid = 0; - rdtgroup_default.mon.rmid = 0; + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; rdtgroup_default.type = RDTCTRL_GROUP; INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); @@ -3889,7 +3889,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { + if (is_llc_occupancy_enabled() && has_busy_rmid(d)) { /* * When a package is going down, forcefully * decrement rmid->ebusy. There is no way to know @@ -3907,16 +3907,17 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) { + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); size_t tsize; if (is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); if (!d->rmid_busy_llc) return -ENOMEM; } if (is_mbm_total_enabled()) { tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_total) { bitmap_free(d->rmid_busy_llc); return -ENOMEM; @@ -3924,7 +3925,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) } if (is_mbm_local_enabled()) { tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_local) { bitmap_free(d->rmid_busy_llc); kfree(d->mbm_total); -- Gitee From 0f7ea72b4b05fdfe4fdf7982fc3b6be5facd2e85 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:21 +0000 Subject: [PATCH 515/953] x86/resctrl: Allow RMID allocation to be scoped by CLOSID ANBZ: #8626 commit c4c0376eefe185b790d89ca8016b7f837ebf25da upstream. MPAMs RMID values are not unique unless the CLOSID is considered as well. alloc_rmid() expects the RMID to be an independent number. Pass the CLOSID in to alloc_rmid(). Use this to compare indexes when allocating. If the CLOSID is not relevant to the index, this ends up comparing the free RMID with itself, and the first free entry will be used. With MPAM the CLOSID is included in the index, so this becomes a walk of the free RMID entries, until one that matches the supplied CLOSID is found. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-8-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 43 ++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- 4 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index cbba782acd0c..872ba1a34103 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -543,7 +543,7 @@ void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); -int alloc_rmid(void); +int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit rdt_put_mon_l3_config(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index bc5ceef143ab..c49f2e89ef29 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -344,24 +344,49 @@ bool has_busy_rmid(struct rdt_domain *d) return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; } +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + /* - * As of now the RMIDs allocation is global. - * However we keep track of which packages the RMIDs - * are used to optimize the limbo list management. + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. */ -int alloc_rmid(void) +int alloc_rmid(u32 closid) { struct rmid_entry *entry; lockdep_assert_held(&rdtgroup_mutex); - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? -EBUSY : -ENOSPC; + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); - entry = list_first_entry(&rmid_free_lru, - struct rmid_entry, list); list_del(&entry->list); - return entry->rmid; } diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 65bee6f11015..d8f44113ed1f 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -777,7 +777,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) int ret; if (rdt_mon_capable) { - ret = alloc_rmid(); + ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); return ret; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index a7dbc0e7e559..dcffd1c4a476 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3295,7 +3295,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) if (!rdt_mon_capable) return 0; - ret = alloc_rmid(); + ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); return ret; -- Gitee From 1b501dfb7df9837d85ddc46fa1db11d724193c7d Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:22 +0000 Subject: [PATCH 516/953] x86/resctrl: Track the number of dirty RMID a CLOSID has ANBZ: #8626 commit b30a55df60c35df09b9ef08dfb0a0cbb543abe81 upstream. MPAM's PMG bits extend its PARTID space, meaning the same PMG value can be used for different control groups. This means once a CLOSID is allocated, all its monitoring ids may still be dirty, and held in limbo. Keep track of the number of RMID held in limbo each CLOSID has. This will allow a future helper to find the 'cleanest' CLOSID when allocating. The array is only needed when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. This will never be the case on x86. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-9-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/monitor.c | 75 +++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c49f2e89ef29..13b0c8d14f3d 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -50,6 +50,13 @@ struct rmid_entry { */ static LIST_HEAD(rmid_free_lru); +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + /* * @rmid_limbo_count - count of currently unused but (potentially) * dirty RMIDs. @@ -292,6 +299,17 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + /* * Check the RMIDs that are marked as busy for this domain. If the * reported LLC occupancy is below the threshold clear the busy bit and @@ -328,10 +346,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) if (force_free || !rmid_dirty) { clear_bit(idx, d->rmid_busy_llc); - if (!--entry->busy) { - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - } + if (!--entry->busy) + limbo_release_entry(entry); } cur_idx = idx + 1; } @@ -398,6 +414,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) u64 val = 0; u32 idx; + lockdep_assert_held(&rdtgroup_mutex); + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; @@ -423,10 +441,13 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) } put_cpu(); - if (entry->busy) + if (entry->busy) { rmid_limbo_count++; - else + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; + } else { list_add_tail(&entry->list, &rmid_free_lru); + } } void free_rmid(u32 closid, u32 rmid) @@ -770,13 +791,39 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) static int dom_data_init(struct rdt_resource *r) { u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); struct rmid_entry *entry = NULL; + int err = 0, i; u32 idx; - int i; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) - return -ENOMEM; + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } for (i = 0; i < idx_limit; i++) { entry = &rmid_ptrs[i]; @@ -796,13 +843,21 @@ static int dom_data_init(struct rdt_resource *r) entry = __rmid_entry(idx); list_del(&entry->list); - return 0; +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; } static void __exit dom_data_exit(void) { mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + kfree(rmid_ptrs); rmid_ptrs = NULL; -- Gitee From d88edecf225dd34ec66d5f93d7807fec3d7b7bac Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:23 +0000 Subject: [PATCH 517/953] x86/resctrl: Use __set_bit()/__clear_bit() instead of open coding ANBZ: #8626 commit 5d920b6881f2249be3a028ce0a7f31c5cc61b1ee upstream. The resctrl CLOSID allocator uses a single 32bit word to track which CLOSID are free. The setting and clearing of bits is open coded. Convert the existing open coded bit manipulations of closid_free_map to use __set_bit() and friends. These don't need to be atomic as this list is protected by the mutex. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-10-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index dcffd1c4a476..bc6e0f83c847 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -111,7 +111,7 @@ void rdt_staged_configs_clear(void) * - Our choices on how to configure each resource become progressively more * limited as the number of resources grows. */ -static int closid_free_map; +static unsigned long closid_free_map; static int closid_free_map_len; int closids_supported(void) @@ -130,8 +130,8 @@ static void closid_init(void) closid_free_map = BIT_MASK(rdt_min_closid) - 1; - /* CLOSID 0 is always reserved for the default group */ - closid_free_map &= ~1; + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); closid_free_map_len = rdt_min_closid; } @@ -139,17 +139,21 @@ static int closid_alloc(void) { u32 closid = ffs(closid_free_map); + lockdep_assert_held(&rdtgroup_mutex); + if (closid == 0) return -ENOSPC; closid--; - closid_free_map &= ~(1 << closid); + __clear_bit(closid, &closid_free_map); return closid; } void closid_free(int closid) { - closid_free_map |= 1 << closid; + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, &closid_free_map); } /** @@ -161,7 +165,9 @@ void closid_free(int closid) */ static bool closid_allocated(unsigned int closid) { - return (closid_free_map & (1 << closid)) == 0; + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, &closid_free_map); } /** -- Gitee From 29e2864ce6b2a6ec74d5613a9cf5a51dafd693ed Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:24 +0000 Subject: [PATCH 518/953] x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid ANBZ: #8626 commit 6eac36bb9eb0349c983313c71692c19d50b56878 upstream. MPAM's PMG bits extend its PARTID space, meaning the same PMG value can be used for different control groups. This means once a CLOSID is allocated, all its monitoring ids may still be dirty, and held in limbo. Instead of allocating the first free CLOSID, on architectures where CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is enabled, search closid_num_dirty_rmid[] to find the cleanest CLOSID. The CLOSID found is returned to closid_alloc() for the free list to be updated. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-11-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 ++ arch/x86/kernel/cpu/resctrl/monitor.c | 45 ++++++++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 19 ++++++++--- 3 files changed, 61 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 872ba1a34103..b7b9d9230bef 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -566,5 +566,7 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); void __init mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 13b0c8d14f3d..101f1b112d17 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -386,6 +386,51 @@ static struct rmid_entry *resctrl_find_free_rmid(u32 closid) return ERR_PTR(-ENOSPC); } +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + /* * For MPAM the RMID value is not unique, and has to be considered with * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index bc6e0f83c847..8fc46204a6cc 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -137,13 +137,22 @@ static void closid_init(void) static int closid_alloc(void) { - u32 closid = ffs(closid_free_map); + int cleanest_closid; + u32 closid; lockdep_assert_held(&rdtgroup_mutex); - if (closid == 0) - return -ENOSPC; - closid--; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = ffs(closid_free_map); + if (closid == 0) + return -ENOSPC; + closid--; + } __clear_bit(closid, &closid_free_map); return closid; @@ -163,7 +172,7 @@ void closid_free(int closid) * Return: true if @closid is currently associated with a resource group, * false if @closid is free */ -static bool closid_allocated(unsigned int closid) +bool closid_allocated(unsigned int closid) { lockdep_assert_held(&rdtgroup_mutex); -- Gitee From 44d8bbd6d3a226b70de13ed61629291df5c7cbe0 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:25 +0000 Subject: [PATCH 519/953] x86/resctrl: Move CLOSID/RMID matching and setting to use helpers ANBZ: #8626 commit 6eca639d8340b569ff78ffd753796e83ef7075ae upstream. When switching tasks, the CLOSID and RMID that the new task should use are stored in struct task_struct. For x86 the CLOSID known by resctrl, the value in task_struct, and the value written to the CPU register are all the same thing. MPAM's CPU interface has two different PARTIDs - one for data accesses the other for instruction fetch. Storing resctrl's CLOSID value in struct task_struct implies the arch code knows whether resctrl is using CDP. Move the matching and setting of the struct task_struct properties to use helpers. This allows arm64 to store the hardware format of the register, instead of having to convert it each time. __rdtgroup_move_task()s use of READ_ONCE()/WRITE_ONCE() ensures torn values aren't seen as another CPU may schedule the task being moved while the value is being changed. MPAM has an additional corner-case here as the PMG bits extend the PARTID space. If the scheduler sees a new-CLOSID but old-RMID, the task will dirty an RMID that the limbo code is not watching causing an inaccurate count. x86's RMID are independent values, so the limbo code will still be watching the old-RMID in this circumstance. To avoid this, arm64 needs both the CLOSID/RMID WRITE_ONCE()d together. Both values must be provided together. Because MPAM's RMID values are not unique, the CLOSID must be provided when matching the RMID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-12-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 18 ++++++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 62 ++++++++++++++++---------- 2 files changed, 56 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index db4c84dde2d5..1d274dbabc44 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -95,6 +95,24 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } +static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, + u32 closid, u32 rmid) +{ + WRITE_ONCE(tsk->closid, closid); + WRITE_ONCE(tsk->rmid, rmid); +} + +static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + return READ_ONCE(tsk->closid) == closid; +} + +static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, + u32 rmid) +{ + return READ_ONCE(tsk->rmid) == rmid; +} + static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 8fc46204a6cc..e42cbdf8f6a3 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -102,7 +102,7 @@ void rdt_staged_configs_clear(void) * * Using a global CLOSID across all resources has some advantages and * some drawbacks: - * + We can simply set "current->closid" to assign a task to a resource + * + We can simply set current's closid to assign a task to a resource * group. * + Context switch code can avoid extra memory references deciding which * CLOSID to load into the PQR_ASSOC MSR @@ -574,14 +574,26 @@ static void update_task_closid_rmid(struct task_struct *t) _update_task_closid_rmid(t); } +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + static int __rdtgroup_move_task(struct task_struct *tsk, struct rdtgroup *rdtgrp) { /* If the task is already in rdtgrp, no need to move the task. */ - if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && - tsk->rmid == rdtgrp->mon.rmid) || - (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && - tsk->closid == rdtgrp->mon.parent->closid)) + if (task_in_rdtgroup(tsk, rdtgrp)) return 0; /* @@ -592,19 +604,19 @@ static int __rdtgroup_move_task(struct task_struct *tsk, * For monitor groups, can move the tasks only from * their parent CTRL group. */ - - if (rdtgrp->type == RDTCTRL_GROUP) { - WRITE_ONCE(tsk->closid, rdtgrp->closid); - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else if (rdtgrp->type == RDTMON_GROUP) { - if (rdtgrp->mon.parent->closid == tsk->closid) { - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; } + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + /* * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. @@ -626,14 +638,15 @@ static int __rdtgroup_move_task(struct task_struct *tsk, static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_alloc_capable && - (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); + return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); } static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_mon_capable && - (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); + return (rdt_mon_capable && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); } /** @@ -884,7 +897,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, rdtg->mode != RDT_MODE_EXCLUSIVE) continue; - if (rdtg->closid != tsk->closid) + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) continue; seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", @@ -892,7 +905,8 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, seq_puts(s, "mon:"); list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, mon.crdtgrp_list) { - if (tsk->rmid != crg->mon.rmid) + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) continue; seq_printf(s, "%s", crg->kn->name); break; @@ -2820,8 +2834,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, for_each_process_thread(p, t) { if (!from || is_closid_match(t, from) || is_rmid_match(t, from)) { - WRITE_ONCE(t->closid, to->closid); - WRITE_ONCE(t->rmid, to->mon.rmid); + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); /* * Order the closid/rmid stores above before the loads -- Gitee From 5cb37ea7ce91c915877b064f2716e16692398035 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:26 +0000 Subject: [PATCH 520/953] x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow ANBZ: #8626 commit a4846aaf39455fe69fce3522b385319383666eef upstream. The limbo and overflow code picks a CPU to use from the domain's list of online CPUs. Work is then scheduled on these CPUs to maintain the limbo list and any counters that may overflow. cpumask_any() may pick a CPU that is marked nohz_full, which will either penalise the work that CPU was dedicated to, or delay the processing of limbo list or counters that may overflow. Perhaps indefinitely. Delaying the overflow handling will skew the bandwidth values calculated by mba_sc, which expects to be called once a second. Add cpumask_any_housekeeping() as a replacement for cpumask_any() that prefers housekeeping CPUs. This helper will still return a nohz_full CPU if that is the only option. The CPU to use is re-evaluated each time the limbo/overflow work runs. This ensures the work will move off a nohz_full CPU once a housekeeping CPU is available. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-13-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 24 ++++++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/monitor.c | 20 +++++++++++++------- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index b7b9d9230bef..81f5de916db8 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -55,6 +56,29 @@ /* Max event bits supported */ #define MAX_EVT_CONFIG_BITS GENMASK(6, 0) +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * + * Returns a CPU in @mask. If there are housekeeping CPUs that don't use + * nohz_full, these are preferred. + */ +static inline unsigned int cpumask_any_housekeeping(const struct cpumask *mask) +{ + unsigned int cpu, hk_cpu; + + cpu = cpumask_any(mask); + if (!tick_nohz_full_cpu(cpu)) + return cpu; + + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + struct rdt_fs_context { struct kernfs_fs_context kfc; bool enable_cdpl2; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 101f1b112d17..38f85e53ca93 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -761,7 +761,6 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, void cqm_handle_limbo(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - int cpu = smp_processor_id(); struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); @@ -770,8 +769,11 @@ void cqm_handle_limbo(struct work_struct *work) __check_limbo(d, false); - if (has_busy_rmid(d)) - schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } mutex_unlock(&rdtgroup_mutex); } @@ -781,7 +783,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - cpu = cpumask_any(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->cqm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); @@ -791,7 +793,6 @@ void mbm_handle_overflow(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); struct rdtgroup *prgrp, *crgrp; - int cpu = smp_processor_id(); struct list_head *head; struct rdt_resource *r; struct rdt_domain *d; @@ -815,7 +816,12 @@ void mbm_handle_overflow(struct work_struct *work) update_mba_bw(prgrp, d); } - schedule_delayed_work_on(cpu, &d->mbm_over, delay); + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); out_unlock: mutex_unlock(&rdtgroup_mutex); @@ -828,7 +834,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) if (!static_branch_likely(&rdt_mon_enable_key)) return; - cpu = cpumask_any(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->mbm_over, delay); } -- Gitee From e6ecfe69e82b9354381d686a0340f3fa28015e9e Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:27 +0000 Subject: [PATCH 521/953] x86/resctrl: Queue mon_event_read() instead of sending an IPI ANBZ: #8626 commit 09909e098113bed99c9f63e1df89073e92c69891 upstream. Intel is blessed with an abundance of monitors, one per RMID, that can be read from any CPU in the domain. MPAMs monitors reside in the MMIO MSC, the number implemented is up to the manufacturer. This means when there are fewer monitors than needed, they need to be allocated and freed. MPAM's CSU monitors are used to back the 'llc_occupancy' monitor file. The CSU counter is allowed to return 'not ready' for a small number of micro-seconds after programming. To allow one CSU hardware monitor to be used for multiple control or monitor groups, the CPU accessing the monitor needs to be able to block when configuring and reading the counter. Worse, the domain may be broken up into slices, and the MMIO accesses for each slice may need performing from different CPUs. These two details mean MPAMs monitor code needs to be able to sleep, and IPI another CPU in the domain to read from a resource that has been sliced. mon_event_read() already invokes mon_event_count() via IPI, which means this isn't possible. On systems using nohz-full, some CPUs need to be interrupted to run kernel work as they otherwise stay in user-space running realtime workloads. Interrupting these CPUs should be avoided, and scheduling work on them may never complete. Change mon_event_read() to pick a housekeeping CPU, (one that is not using nohz_full) and schedule mon_event_count() and wait. If all the CPUs in a domain are using nohz-full, then an IPI is used as the fallback. This function is only used in response to a user-space filesystem request (not the timing sensitive overflow code). This allows MPAM to hide the slice behaviour from resctrl, and to keep the monitor-allocation in monitor.c. When the IPI fallback is used on machines where MPAM needs to make an access on multiple CPUs, the counter read will always fail. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-14-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 26 +++++++++++++++++++++-- arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index beccb0e87ba7..e933e1cdb1c9 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -19,6 +19,8 @@ #include #include #include +#include + #include "internal.h" /* @@ -522,12 +524,21 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, return ret; } +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first) { + int cpu; + /* - * setup the parameters to send to the IPI to read the data. + * Setup the parameters to pass to mon_event_count() to read the data. */ rr->rgrp = rdtgrp; rr->evtid = evtid; @@ -536,7 +547,18 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, rr->val = 0; rr->first = first; - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + cpu = cpumask_any_housekeeping(&d->cpu_mask); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); } int rdtgroup_mondata_show(struct seq_file *m, void *arg) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 38f85e53ca93..fd060ef86f38 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -585,7 +585,7 @@ static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) } /* - * This is called via IPI to read the CQM/MBM counters + * This is scheduled by mon_event_read() to read the CQM/MBM counters * on a domain. */ void mon_event_count(void *info) -- Gitee From e7df007955f3bc26ed043776cb350257075bb3fd Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:28 +0000 Subject: [PATCH 522/953] x86/resctrl: Allow resctrl_arch_rmid_read() to sleep ANBZ: #8626 commit 6fde1424f29b151b9dc8c660eecf4d1645facea5 upstream. MPAM's cache occupancy counters can take a little while to settle once the monitor has been configured. The maximum settling time is described to the driver via a firmware table. The value could be large enough that it makes sense to sleep. To avoid exposing this to resctrl, it should be hidden behind MPAM's resctrl_arch_rmid_read(). resctrl_arch_rmid_read() may be called via IPI meaning it is unable to sleep. In this case, it should return an error if it needs to sleep. This will only affect MPAM platforms where the cache occupancy counter isn't available immediately, nohz_full is in use, and there are no housekeeping CPUs in the necessary domain. There are three callers of resctrl_arch_rmid_read(): __mon_event_count() and __check_limbo() are both called from a non-migrateable context. mon_event_read() invokes __mon_event_count() using smp_call_on_cpu(), which adds work to the target CPUs workqueue. rdtgroup_mutex() is held, meaning this cannot race with the resctrl cpuhp callback. __check_limbo() is invoked via schedule_delayed_work_on() also adds work to a per-cpu workqueue. The remaining call is add_rmid_to_limbo() which is called in response to a user-space syscall that frees an RMID. This opportunistically reads the LLC occupancy counter on the current domain to see if the RMID is over the dirty threshold. This has to disable preemption to avoid reading the wrong domain's value. Disabling preemption here prevents resctrl_arch_rmid_read() from sleeping. add_rmid_to_limbo() walks each domain, but only reads the counter on one domain. If the system has more than one domain, the RMID will always be added to the limbo list. If the RMIDs usage was not over the threshold, it will be removed from the list when __check_limbo() runs. Make this the default behaviour. Free RMIDs are always added to the limbo list for each domain. The user visible effect of this is that a clean RMID is not available for re-allocation immediately after 'rmdir()' completes. This behaviour was never portable as it never happened on a machine with multiple domains. Removing this path allows resctrl_arch_rmid_read() to sleep if its called with interrupts unmasked. Document this is the expected behaviour, and add a might_sleep() annotation to catch changes that won't work on arm64. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-15-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/monitor.c | 25 +++++-------------------- include/linux/resctrl.h | 23 ++++++++++++++++++++++- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index fd060ef86f38..e8aeff6673ea 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -277,6 +277,8 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u64 msr_val, chunks; int ret; + resctrl_arch_rmid_read_context_check(); + if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; @@ -455,8 +457,6 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rdt_domain *d; - int cpu, err; - u64 val = 0; u32 idx; lockdep_assert_held(&rdtgroup_mutex); @@ -464,17 +464,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; - cpu = get_cpu(); list_for_each_entry(d, &r->domains, list) { - if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->closid, - entry->rmid, - QOS_L3_OCCUP_EVENT_ID, - &val); - if (err || val <= resctrl_rmid_realloc_threshold) - continue; - } - /* * For the first limbo RMID in the domain, * setup up the limbo worker. @@ -484,15 +474,10 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) set_bit(idx, d->rmid_busy_llc); entry->busy++; } - put_cpu(); - if (entry->busy) { - rmid_limbo_count++; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]++; - } else { - list_add_tail(&entry->list, &rmid_free_lru); - } + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; } void free_rmid(u32 closid, u32 rmid) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bd4ec22b5a96..8649fc84aac2 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -236,7 +236,12 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. * - * Call from process context on a CPU that belongs to domain @d. + * Some architectures need to sleep when first programming some of the counters. + * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' + * for a short period of time). Call from a non-migrateable process context on + * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or + * schedule_work_on(). This function can be called with interrupts masked, + * e.g. using smp_call_function_any(), but may consistently return an error. * * Return: * 0 on success, or -EIO, -EINVAL etc on error. @@ -245,6 +250,22 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, u64 *val); +/** + * resctrl_arch_rmid_read_context_check() - warn about invalid contexts + * + * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when + * resctrl_arch_rmid_read() is called with preemption disabled. + * + * The contract with resctrl_arch_rmid_read() is that if interrupts + * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an + * IPI, (and fail if the call needed to sleep), while most of the time + * the work is scheduled, allowing the call to sleep. + */ +static inline void resctrl_arch_rmid_read_context_check(void) +{ + if (!irqs_disabled()) + might_sleep(); +} /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid -- Gitee From b4bde791265ee2c89d3d99beedbb03c52eb8db84 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:29 +0000 Subject: [PATCH 523/953] x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read() ANBZ: #8626 commit e557999f80a5ee4ec812f594ab42bb76c3ec4eb2 upstream. Depending on the number of monitors available, Arm's MPAM may need to allocate a monitor prior to reading the counter value. Allocating a contended resource may involve sleeping. __check_limbo() and mon_event_count() each make multiple calls to resctrl_arch_rmid_read(), to avoid extra work on contended systems, the allocation should be valid for multiple invocations of resctrl_arch_rmid_read(). The memory or hardware allocated is not specific to a domain. Add arch hooks for this allocation, which need calling before resctrl_arch_rmid_read(). The allocated monitor is passed to resctrl_arch_rmid_read(), then freed again afterwards. The helper can be called on any CPU, and can sleep. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-16-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 11 +++++++ arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 7 +++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 35 +++++++++++++++++++++-- include/linux/resctrl.h | 5 +++- 5 files changed, 55 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 1d274dbabc44..29c4cc343787 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -136,6 +136,17 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) return rmid; } +/* x86 can always read an rmid, nothing needs allocating */ +struct rdt_resource; +static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + might_sleep(); + return NULL; +}; + +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *ctx) { }; + void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index e933e1cdb1c9..52fa0e14cb86 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -546,6 +546,11 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, rr->d = d; rr->val = 0; rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } cpu = cpumask_any_housekeeping(&d->cpu_mask); @@ -559,6 +564,8 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); else smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); } int rdtgroup_mondata_show(struct seq_file *m, void *arg) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 81f5de916db8..e089d1a1a055 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -137,6 +137,7 @@ struct rmid_read { bool first; int err; u64 val; + void *arch_mon_ctx; }; extern bool rdt_alloc_capable; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index e8aeff6673ea..9b503e6ac490 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -269,7 +269,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid, - u64 *val) + u64 *val, void *ignored) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -324,9 +324,17 @@ void __check_limbo(struct rdt_domain *d, bool force_free) u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; u32 idx, cur_idx = 1; + void *arch_mon_ctx; bool rmid_dirty; u64 val = 0; + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + /* * Skip RMID 0 and start from RMID 1 and check all the RMIDs that * are marked as busy for occupancy < threshold. If the occupancy @@ -340,7 +348,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) entry = __rmid_entry(idx); if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val)) { + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { rmid_dirty = true; } else { rmid_dirty = (val >= resctrl_rmid_realloc_threshold); @@ -353,6 +362,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) } cur_idx = idx + 1; } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); } bool has_busy_rmid(struct rdt_domain *d) @@ -533,7 +544,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) } rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, - &tval); + &tval, rr->arch_mon_ctx); if (rr->err) return rr->err; @@ -722,11 +733,27 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, if (is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + __mon_event_count(closid, rmid, &rr); /* @@ -736,6 +763,8 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, */ if (is_mba_sc(NULL)) mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8649fc84aac2..bf460c912bf5 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -235,6 +235,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. + * @arch_mon_ctx: An architecture specific value from + * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies + * the hardware monitor allocated for this read request. * * Some architectures need to sleep when first programming some of the counters. * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' @@ -248,7 +251,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, - u64 *val); + u64 *val, void *arch_mon_ctx); /** * resctrl_arch_rmid_read_context_check() - warn about invalid contexts -- Gitee From 74fb0907f568e5a7c9e2baf323d6ce5a4be33580 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:30 +0000 Subject: [PATCH 524/953] x86/resctrl: Make resctrl_mounted checks explicit ANBZ: #8626 commit 13e5769debf09588543db83836c524148873929f upstream. The rdt_enable_key is switched when resctrl is mounted, and used to prevent a second mount of the filesystem. It also enables the architecture's context switch code. This requires another architecture to have the same set of static keys, as resctrl depends on them too. The existing users of these static keys are implicitly also checking if the filesystem is mounted. Make the resctrl_mounted checks explicit: resctrl can keep track of whether it has been mounted once. This doesn't need to be combined with whether the arch code is context switching the CLOSID. rdt_mon_enable_key is never used just to test that resctrl is mounted, but does also have this implication. Add a resctrl_mounted to all uses of rdt_mon_enable_key. This will allow the static key changing to be moved behind resctrl_arch_ calls. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-17-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 12 ++++++++++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++------ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index e089d1a1a055..9bfda6963794 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -144,6 +144,7 @@ extern bool rdt_alloc_capable; extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; enum rdt_group_type { RDTCTRL_GROUP = 0, diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 9b503e6ac490..d5d8a58d96f2 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -813,7 +813,11 @@ void mbm_handle_overflow(struct work_struct *work) mutex_lock(&rdtgroup_mutex); - if (!static_branch_likely(&rdt_mon_enable_key)) + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -846,7 +850,11 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - if (!static_branch_likely(&rdt_mon_enable_key)) + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) return; cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e42cbdf8f6a3..857fbbc3c839 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -42,6 +42,9 @@ LIST_HEAD(rdt_all_groups); /* list of entries for the schemata file */ LIST_HEAD(resctrl_schema_all); +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + /* Kernel fs node for "info" directory under root */ static struct kernfs_node *kn_info; @@ -881,7 +884,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, mutex_lock(&rdtgroup_mutex); /* Return empty if resctrl has not been mounted. */ - if (!static_branch_unlikely(&rdt_enable_key)) { + if (!resctrl_mounted) { seq_puts(s, "res:\nmon:\n"); goto unlock; } @@ -2608,7 +2611,7 @@ static int rdt_get_tree(struct fs_context *fc) /* * resctrl file system can only be mounted once. */ - if (static_branch_unlikely(&rdt_enable_key)) { + if (resctrl_mounted) { ret = -EBUSY; goto out; } @@ -2669,8 +2672,10 @@ static int rdt_get_tree(struct fs_context *fc) if (rdt_mon_capable) static_branch_enable_cpuslocked(&rdt_mon_enable_key); - if (rdt_alloc_capable || rdt_mon_capable) + if (rdt_alloc_capable || rdt_mon_capable) { static_branch_enable_cpuslocked(&rdt_enable_key); + resctrl_mounted = true; + } if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -2944,6 +2949,7 @@ static void rdt_kill_sb(struct super_block *sb) static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key); + resctrl_mounted = false; kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); @@ -3913,7 +3919,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) * If resctrl is mounted, remove all the * per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); if (is_mbm_enabled()) @@ -3990,8 +3996,13 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_llc_occupancy_enabled()) INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - /* If resctrl is mounted, add per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) mkdir_mondata_subdir_allrdtgrp(r, d); return 0; -- Gitee From a7028ffa78c0dbf3a1135e33d242cec43eec297f Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:31 +0000 Subject: [PATCH 525/953] x86/resctrl: Move alloc/mon static keys into helpers ANBZ: #8626 commit 5db6a4a75c95f6967d57906ba7b82756d1985d63 upstream. resctrl enables three static keys depending on the features it has enabled. Another architecture's context switch code may look different, any static keys that control it should be buried behind helpers. Move the alloc/mon logic into arch-specific helpers as a preparatory step for making the rdt_enable_key's status something the arch code decides. This means other architectures don't have to mirror the static keys. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-18-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 20 ++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/internal.h | 5 ----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 29c4cc343787..3c9137b6ad4f 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -42,6 +42,26 @@ DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline void resctrl_arch_enable_alloc(void) +{ + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); +} + +static inline void resctrl_arch_disable_alloc(void) +{ + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); +} + +static inline void resctrl_arch_enable_mon(void) +{ + static_branch_enable_cpuslocked(&rdt_mon_enable_key); +} + +static inline void resctrl_arch_disable_mon(void) +{ + static_branch_disable_cpuslocked(&rdt_mon_enable_key); +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 9bfda6963794..78580855139d 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -94,9 +94,6 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) return container_of(kfc, struct rdt_fs_context, kfc); } -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); -DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); - /** * struct mon_evt - Entry in the event list of a resource * @evtid: event id @@ -452,8 +449,6 @@ extern struct mutex rdtgroup_mutex; extern struct rdt_hw_resource rdt_resources_all[]; extern struct rdtgroup rdtgroup_default; -DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); - extern struct dentry *debugfs_resctrl; enum resctrl_res_level { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 857fbbc3c839..231207f09e04 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2668,9 +2668,9 @@ static int rdt_get_tree(struct fs_context *fc) goto out_psl; if (rdt_alloc_capable) - static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + resctrl_arch_enable_alloc(); if (rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_mon_enable_key); + resctrl_arch_enable_mon(); if (rdt_alloc_capable || rdt_mon_capable) { static_branch_enable_cpuslocked(&rdt_enable_key); @@ -2946,8 +2946,8 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - static_branch_disable_cpuslocked(&rdt_alloc_enable_key); - static_branch_disable_cpuslocked(&rdt_mon_enable_key); + resctrl_arch_disable_alloc(); + resctrl_arch_disable_mon(); static_branch_disable_cpuslocked(&rdt_enable_key); resctrl_mounted = false; kernfs_kill_sb(sb); -- Gitee From 30c1fd15eae5aa23806e2737db3fc66d2dbf4de7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:32 +0000 Subject: [PATCH 526/953] x86/resctrl: Make rdt_enable_key the arch's decision to switch ANBZ: #8626 commit 0a2f4d9b548c5b1e2e3fcfa966f5d47b1cacff01 upstream. rdt_enable_key is switched when resctrl is mounted. It was also previously used to prevent a second mount of the filesystem. Any other architecture that wants to support resctrl has to provide identical static keys. Now that there are helpers for enabling and disabling the alloc/mon keys, resctrl doesn't need to switch this extra key, it can be done by the arch code. Use the static-key increment and decrement helpers, and change resctrl to ensure the calls are balanced. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-19-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 4 ++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 11 +++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 3c9137b6ad4f..b74aa34dc9e8 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -45,21 +45,25 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); static inline void resctrl_arch_enable_alloc(void) { static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_disable_alloc(void) { static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_enable_mon(void) { static_branch_enable_cpuslocked(&rdt_mon_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_disable_mon(void) { static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); } /* diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 231207f09e04..7e57ac9d81f7 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2672,10 +2672,8 @@ static int rdt_get_tree(struct fs_context *fc) if (rdt_mon_capable) resctrl_arch_enable_mon(); - if (rdt_alloc_capable || rdt_mon_capable) { - static_branch_enable_cpuslocked(&rdt_enable_key); + if (rdt_alloc_capable || rdt_mon_capable) resctrl_mounted = true; - } if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -2946,9 +2944,10 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - resctrl_arch_disable_alloc(); - resctrl_arch_disable_mon(); - static_branch_disable_cpuslocked(&rdt_enable_key); + if (rdt_alloc_capable) + resctrl_arch_disable_alloc(); + if (rdt_mon_capable) + resctrl_arch_disable_mon(); resctrl_mounted = false; kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); -- Gitee From bbea621940b24b5a06367a691e3d5a4bccd4bb2e Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:33 +0000 Subject: [PATCH 527/953] x86/resctrl: Add helpers for system wide mon/alloc capable ANBZ: #8626 commit 30017b60706c2ba72a0a4da7d5ef8f5fa95a2f01 upstream. resctrl reads rdt_alloc_capable or rdt_mon_capable to determine whether any of the resources support the corresponding features. resctrl also uses the static keys that affect the architecture's context-switch code to determine the same thing. This forces another architecture to have the same static keys. As the static key is enabled based on the capable flag, and none of the filesystem uses of these are in the scheduler path, move the capable flags behind helpers, and use these in the filesystem code instead of the static key. After this change, only the architecture code manages and uses the static keys to ensure __resctrl_sched_in() does not need runtime checks. This avoids multiple architectures having to define the same static keys. Cases where the static key implicitly tested if the resctrl filesystem was mounted all have an explicit check now. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-20-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 13 ++++++++ arch/x86/kernel/cpu/resctrl/internal.h | 2 -- arch/x86/kernel/cpu/resctrl/monitor.c | 4 +-- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 38 +++++++++++------------ 5 files changed, 37 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index b74aa34dc9e8..12dbd2588ca7 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -38,10 +38,18 @@ struct resctrl_pqr_state { DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline bool resctrl_arch_alloc_capable(void) +{ + return rdt_alloc_capable; +} + static inline void resctrl_arch_enable_alloc(void) { static_branch_enable_cpuslocked(&rdt_alloc_enable_key); @@ -54,6 +62,11 @@ static inline void resctrl_arch_disable_alloc(void) static_branch_dec_cpuslocked(&rdt_enable_key); } +static inline bool resctrl_arch_mon_capable(void) +{ + return rdt_mon_capable; +} + static inline void resctrl_arch_enable_mon(void) { static_branch_enable_cpuslocked(&rdt_mon_enable_key); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 78580855139d..3ee855c37447 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -137,8 +137,6 @@ struct rmid_read { void *arch_mon_ctx; }; -extern bool rdt_alloc_capable; -extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; extern bool resctrl_mounted; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index d5d8a58d96f2..92d7ba674003 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -817,7 +817,7 @@ void mbm_handle_overflow(struct work_struct *work) * If the filesystem has been unmounted this work no longer needs to * run. */ - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) + if (!resctrl_mounted || !resctrl_arch_mon_capable()) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -854,7 +854,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) * When a domain comes online there is no guarantee the filesystem is * mounted. If not, there is no need to catch counter overflow. */ - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) + if (!resctrl_mounted || !resctrl_arch_mon_capable()) return; cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index d8f44113ed1f..8056bed033cc 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -581,7 +581,7 @@ static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) if (ret) goto err_cpus; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); if (ret) goto err_cpus_list; @@ -628,7 +628,7 @@ static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) if (ret) goto err_cpus; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); if (ret) goto err_cpus_list; @@ -776,7 +776,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) { int ret; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7e57ac9d81f7..ed5fc677a99d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -641,13 +641,13 @@ static int __rdtgroup_move_task(struct task_struct *tsk, static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) && + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && resctrl_arch_match_closid(t, r->closid)); } static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_mon_capable && (r->type == RDTMON_GROUP) && + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && resctrl_arch_match_rmid(t, r->mon.parent->closid, r->mon.rmid)); } @@ -2632,7 +2632,7 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) flags |= RFTYPE_MON; ret = rdtgroup_add_files(rdtgroup_default.kn, flags); @@ -2645,7 +2645,7 @@ static int rdt_get_tree(struct fs_context *fc) if (ret < 0) goto out_schemata_free; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = mongroup_create_dir(rdtgroup_default.kn, &rdtgroup_default, "mon_groups", &kn_mongrp); @@ -2667,12 +2667,12 @@ static int rdt_get_tree(struct fs_context *fc) if (ret < 0) goto out_psl; - if (rdt_alloc_capable) + if (resctrl_arch_alloc_capable()) resctrl_arch_enable_alloc(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) resctrl_arch_enable_mon(); - if (rdt_alloc_capable || rdt_mon_capable) + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) resctrl_mounted = true; if (is_mbm_enabled()) { @@ -2686,10 +2686,10 @@ static int rdt_get_tree(struct fs_context *fc) out_psl: rdt_pseudo_lock_release(); out_mondata: - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) kernfs_remove(kn_mondata); out_mongrp: - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) kernfs_remove(kn_mongrp); out_info: kernfs_remove(kn_info); @@ -2944,9 +2944,9 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - if (rdt_alloc_capable) + if (resctrl_arch_alloc_capable()) resctrl_arch_disable_alloc(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) resctrl_arch_disable_mon(); resctrl_mounted = false; kernfs_kill_sb(sb); @@ -3326,7 +3326,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) { int ret; - if (!rdt_mon_capable) + if (!resctrl_arch_mon_capable()) return 0; ret = alloc_rmid(rdtgrp->closid); @@ -3348,7 +3348,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) { - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) free_rmid(rgrp->closid, rgrp->mon.rmid); } @@ -3412,7 +3412,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, if (rtype == RDTCTRL_GROUP) { files = RFTYPE_BASE | RFTYPE_CTRL; - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) files |= RFTYPE_MON; } else { files = RFTYPE_BASE | RFTYPE_MON; @@ -3521,7 +3521,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { /* * Create an empty mon_groups directory to hold the subset * of tasks and cpus to monitor. @@ -3576,14 +3576,14 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, * allocation is supported, add a control and monitoring * subdirectory */ - if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); /* * If RDT monitoring is supported and the parent directory is a valid * "mon_groups" directory, add a monitoring subdirectory. */ - if (rdt_mon_capable && is_mon_groups(parent_kn, name)) + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) return rdtgroup_mkdir_mon(parent_kn, name, mode); return -EPERM; @@ -3918,7 +3918,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) * If resctrl is mounted, remove all the * per domain monitor data directories. */ - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && resctrl_arch_mon_capable()) rmdir_mondata_subdir_allrdtgrp(r, d->id); if (is_mbm_enabled()) @@ -4001,7 +4001,7 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) * by rdt_get_tree() calling mkdir_mondata_all(). * If resctrl is mounted, add per domain monitor data directories. */ - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && resctrl_arch_mon_capable()) mkdir_mondata_subdir_allrdtgrp(r, d); return 0; -- Gitee From 68590af4c5114aad57d12fbd583fafd59a7f080b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:34 +0000 Subject: [PATCH 528/953] x86/resctrl: Add CPU online callback for resctrl work ANBZ: #8626 commit 1b3e50ce7f5001f1e0edaf7d6abea43b264db7ee upstream. The resctrl architecture specific code may need to create a domain when a CPU comes online, it also needs to reset the CPUs PQR_ASSOC register. The resctrl filesystem code needs to update the rdtgroup_default CPU mask when CPUs are brought online. Currently, this is all done in one function, resctrl_online_cpu(). It will need to be split into architecture and filesystem parts before resctrl can be moved to /fs/. Pull the rdtgroup_default update work out as a filesystem specific cpu_online helper. resctrl_online_cpu() is the obvious name for this, which means the version in core.c needs renaming. resctrl_online_cpu() is called by the arch code once it has done the work to add the new CPU to any domains. In future patches, resctrl_online_cpu() will take the rdtgroup_mutex itself. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-21-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 ++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++++++ include/linux/resctrl.h | 1 + 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 97438d844606..eb9efa8349ac 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -606,16 +606,16 @@ static void clear_closid_rmid(int cpu) RESCTRL_RESERVED_CLOSID); } -static int resctrl_online_cpu(unsigned int cpu) +static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - /* The cpu is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); clear_closid_rmid(cpu); + + resctrl_online_cpu(cpu); mutex_unlock(&rdtgroup_mutex); return 0; @@ -971,7 +971,7 @@ static int __init resctrl_late_init(void) state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, resctrl_offline_cpu); if (state < 0) return state; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ed5fc677a99d..38d3b19a3aca 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4007,6 +4007,14 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) return 0; } +void resctrl_online_cpu(unsigned int cpu) +{ + lockdep_assert_held(&rdtgroup_mutex); + + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); +} + /* * rdtgroup_init - rdtgroup initialization * diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bf460c912bf5..4c4bad3c34e4 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -223,6 +223,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type); int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); +void resctrl_online_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid -- Gitee From f1a9bcfa0e038580137ac86f4b20dc100fc393d4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:35 +0000 Subject: [PATCH 529/953] x86/resctrl: Allow overflow/limbo handlers to be scheduled on any-but CPU ANBZ: #8626 commit 978fcca954cb52249babbc14e53de53c88dd6433 upstream. When a CPU is taken offline resctrl may need to move the overflow or limbo handlers to run on a different CPU. Once the offline callbacks have been split, cqm_setup_limbo_handler() will be called while the CPU that is going offline is still present in the CPU mask. Pass the CPU to exclude to cqm_setup_limbo_handler() and mbm_setup_overflow_handler(). These functions can use a variant of cpumask_any_but() when selecting the CPU. -1 is used to indicate no CPUs need excluding. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-22-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 +++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 33 ++++++++++++++---- arch/x86/kernel/cpu/resctrl/monitor.c | 42 ++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 6 ++-- include/linux/resctrl.h | 2 ++ 6 files changed, 72 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index eb9efa8349ac..e8d4887463a0 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -584,12 +584,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0); + /* + * temporary: exclude_cpu=-1 as this CPU has already + * been removed by cpumask_clear_cpu()d + */ + mbm_setup_overflow_handler(d, 0, RESCTRL_PICK_ANY_CPU); } if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0); + cqm_setup_limbo_handler(d, 0, RESCTRL_PICK_ANY_CPU); } } } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 52fa0e14cb86..20b02d6f02c1 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -552,7 +552,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, return; } - cpu = cpumask_any_housekeeping(&d->cpu_mask); + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); /* * cpumask_any_housekeeping() prefers housekeeping CPUs, but diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3ee855c37447..c99f26ebe7a6 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -60,19 +60,36 @@ * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that * aren't marked nohz_full * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. * - * Returns a CPU in @mask. If there are housekeeping CPUs that don't use - * nohz_full, these are preferred. + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. */ -static inline unsigned int cpumask_any_housekeeping(const struct cpumask *mask) +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) { unsigned int cpu, hk_cpu; - cpu = cpumask_any(mask); - if (!tick_nohz_full_cpu(cpu)) + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) return cpu; + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + if (hk_cpu < nr_cpu_ids) cpu = hk_cpu; @@ -573,11 +590,13 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first); void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms); + unsigned long delay_ms, + int exclude_cpu); void mbm_handle_overflow(struct work_struct *work); void __init intel_rdt_mbm_apply_quirk(void); bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 92d7ba674003..67edd4c440f0 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -481,7 +481,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) * setup up the limbo worker. */ if (!has_busy_rmid(d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); set_bit(idx, d->rmid_busy_llc); entry->busy++; } @@ -784,7 +785,8 @@ void cqm_handle_limbo(struct work_struct *work) __check_limbo(d, false); if (has_busy_rmid(d)) { - d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, delay); } @@ -792,15 +794,25 @@ void cqm_handle_limbo(struct work_struct *work) mutex_unlock(&rdtgroup_mutex); } -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - cpu = cpumask_any_housekeeping(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); dom->cqm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); } void mbm_handle_overflow(struct work_struct *work) @@ -838,14 +850,24 @@ void mbm_handle_overflow(struct work_struct *work) * Re-check for housekeeping CPUs. This allows the overflow handler to * move off a nohz_full CPU quickly. */ - d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); out_unlock: mutex_unlock(&rdtgroup_mutex); } -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; @@ -856,9 +878,11 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) */ if (!resctrl_mounted || !resctrl_arch_mon_capable()) return; - cpu = cpumask_any_housekeeping(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); dom->mbm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); } static int dom_data_init(struct rdt_resource *r) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 38d3b19a3aca..f5688c79d94f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2678,7 +2678,8 @@ static int rdt_get_tree(struct fs_context *fc) if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; list_for_each_entry(dom, &r->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); } goto out; @@ -3989,7 +3990,8 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); } if (is_llc_occupancy_enabled()) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 4c4bad3c34e4..ccbbbe5d18d3 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -10,6 +10,8 @@ #define RESCTRL_RESERVED_CLOSID 0 #define RESCTRL_RESERVED_RMID 0 +#define RESCTRL_PICK_ANY_CPU -1 + #ifdef CONFIG_PROC_CPU_RESCTRL int proc_resctrl_show(struct seq_file *m, -- Gitee From 4e255f01665931c7ddec8009d79b5be504eaa52b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:36 +0000 Subject: [PATCH 530/953] x86/resctrl: Add CPU offline callback for resctrl work ANBZ: #8626 commit 258c91e84fedc789353a35ad91d827a9111d3cbd upstream. The resctrl architecture specific code may need to free a domain when a CPU goes offline, it also needs to reset the CPUs PQR_ASSOC register. Amongst other things, the resctrl filesystem code needs to clear this CPU from the cpu_mask of any control and monitor groups. Currently, this is all done in core.c and called from resctrl_offline_cpu(), making the split between architecture and filesystem code unclear. Move the filesystem work to remove the CPU from the control and monitor groups into a filesystem helper called resctrl_offline_cpu(), and rename the one in core.c resctrl_arch_offline_cpu(). Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-23-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 25 +++++-------------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 24 ++++++++++++++++++++++++ include/linux/resctrl.h | 1 + 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e8d4887463a0..928a0ff84228 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -625,31 +625,15 @@ static int resctrl_arch_online_cpu(unsigned int cpu) return 0; } -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +static int resctrl_arch_offline_cpu(unsigned int cpu) { - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { - break; - } - } -} - -static int resctrl_offline_cpu(unsigned int cpu) -{ - struct rdtgroup *rdtgrp; struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); + resctrl_offline_cpu(cpu); + for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } clear_closid_rmid(cpu); mutex_unlock(&rdtgroup_mutex); @@ -975,7 +959,8 @@ static int __init resctrl_late_init(void) state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_arch_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, + resctrl_arch_offline_cpu); if (state < 0) return state; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f5688c79d94f..5bd3d8fb3f67 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4017,6 +4017,30 @@ void resctrl_online_cpu(unsigned int cpu) cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); } +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdtgroup *rdtgrp; + + lockdep_assert_held(&rdtgroup_mutex); + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } +} + /* * rdtgroup_init - rdtgroup initialization * diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index ccbbbe5d18d3..270ff1d5c051 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -226,6 +226,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_online_cpu(unsigned int cpu); +void resctrl_offline_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid -- Gitee From dee45b08114beab5752268e3b8152e540a62393c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:37 +0000 Subject: [PATCH 531/953] x86/resctrl: Move domain helper migration into resctrl_offline_cpu() ANBZ: #8626 commit eeff1d4f118bdf0870227fee5a770f03056e3adc upstream. When a CPU is taken offline the resctrl filesystem code needs to check if it was the CPU nominated to perform the periodic overflow and limbo work. If so, another CPU needs to be chosen to do this work. This is currently done in core.c, mixed in with the code that removes the CPU from the domain's mask, and potentially free()s the domain. Move the migration of the overflow and limbo helpers into the filesystem code, into resctrl_offline_cpu(). As resctrl_offline_cpu() runs before the architecture code has removed the CPU from the domain mask, the callers need to be told which CPU is being removed, to avoid picking it as the new CPU. This uses the exclude_cpu feature previously added. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-24-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 16 ---------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 928a0ff84228..c97a4b42c1ea 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -580,22 +580,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) return; } - - if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - /* - * temporary: exclude_cpu=-1 as this CPU has already - * been removed by cpumask_clear_cpu()d - */ - mbm_setup_overflow_handler(d, 0, RESCTRL_PICK_ANY_CPU); - } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0, RESCTRL_PICK_ANY_CPU); - } - } } static void clear_closid_rmid(int cpu) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5bd3d8fb3f67..777e9f680332 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4029,7 +4029,9 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) void resctrl_offline_cpu(unsigned int cpu) { + struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rdtgroup *rdtgrp; + struct rdt_domain *d; lockdep_assert_held(&rdtgroup_mutex); @@ -4039,6 +4041,22 @@ void resctrl_offline_cpu(unsigned int cpu) break; } } + + if (!l3->mon_capable) + return; + + d = get_domain_from_cpu(cpu, l3); + if (d) { + if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && + has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } } /* -- Gitee From 54fbf3807887a1198264980e102411b9f91b6107 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:38 +0000 Subject: [PATCH 532/953] x86/resctrl: Separate arch and fs resctrl locks ANBZ: #8626 commit fb700810d30b9eb333a7bf447012e1158e35c62f upstream. resctrl has one mutex that is taken by the architecture-specific code, and the filesystem parts. The two interact via cpuhp, where the architecture code updates the domain list. Filesystem handlers that walk the domains list should not run concurrently with the cpuhp callback modifying the list. Exposing a lock from the filesystem code means the interface is not cleanly defined, and creates the possibility of cross-architecture lock ordering headaches. The interaction only exists so that certain filesystem paths are serialised against CPU hotplug. The CPU hotplug code already has a mechanism to do this using cpus_read_lock(). MPAM's monitors have an overflow interrupt, so it needs to be possible to walk the domains list in irq context. RCU is ideal for this, but some paths need to be able to sleep to allocate memory. Because resctrl_{on,off}line_cpu() take the rdtgroup_mutex as part of a cpuhp callback, cpus_read_lock() must always be taken first. rdtgroup_schemata_write() already does this. Most of the filesystem code's domain list walkers are currently protected by the rdtgroup_mutex taken in rdtgroup_kn_lock_live(). The exceptions are rdt_bit_usage_show() and the mon_config helpers which take the lock directly. Make the domain list protected by RCU. An architecture-specific lock prevents concurrent writers. rdt_bit_usage_show() could walk the domain list using RCU, but to keep all the filesystem operations the same, this is changed to call cpus_read_lock(). The mon_config helpers send multiple IPIs, take the cpus_read_lock() in these cases. The other filesystem list walkers need to be able to sleep. Add cpus_read_lock() to rdtgroup_kn_lock_live() so that the cpuhp callbacks can't be invoked when file system operations are occurring. Add lockdep_assert_cpus_held() in the cases where the rdtgroup_kn_lock_live() call isn't obvious. Resctrl's domain online/offline calls now need to take the rdtgroup_mutex themselves. [ bp: Fold in a build fix: https://lore.kernel.org/r/87zfvwieli.ffs@tglx ] Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-25-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 44 +++++++++++---- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 15 ++++- arch/x86/kernel/cpu/resctrl/monitor.c | 8 +++ arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 3 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 68 ++++++++++++++++++----- include/linux/resctrl.h | 2 +- 6 files changed, 112 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index c97a4b42c1ea..490a234ec9fd 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "resctrl: " fmt +#include #include #include #include @@ -25,8 +26,15 @@ #include #include "internal.h" -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); +/* + * rdt_domain structures are kfree()d when their last CPU goes offline, + * and allocated when the first CPU in a new domain comes online. + * The rdt_resource's domain list is updated when this happens. Readers of + * the domain list must either take cpus_read_lock(), or rely on an RCU + * read-side critical section, to avoid observing concurrent modification. + * All writers take this mutex: + */ +static DEFINE_MUTEX(domain_list_lock); /* * The cached resctrl_pqr_state is strictly per CPU and can never be @@ -354,6 +362,15 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; + /* + * Walking r->domains, ensure it can't race with cpuhp. + * Because this is called via IPI by rdt_ctrl_update(), assertions + * about locks this thread holds will lead to false positives. Check + * someone is holding the CPUs lock. + */ + if (IS_ENABLED(CONFIG_HOTPLUG_CPU) && IS_ENABLED(CONFIG_LOCKDEP)) + WARN_ON_ONCE(!lockdep_is_cpus_held()); + list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) @@ -510,6 +527,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) struct rdt_domain *d; int err; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -543,11 +562,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) return; } - list_add_tail(&d->list, add_pos); + list_add_tail_rcu(&d->list, add_pos); err = resctrl_online_domain(r, d); if (err) { - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); domain_free(hw_dom); } } @@ -558,6 +578,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) struct rdt_hw_domain *hw_dom; struct rdt_domain *d; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -568,7 +590,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { resctrl_offline_domain(r, d); - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); /* * rdt_domain "d" is going to be freed below, so clear @@ -598,13 +621,13 @@ static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - clear_closid_rmid(cpu); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); resctrl_online_cpu(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } @@ -613,13 +636,14 @@ static int resctrl_arch_offline_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); resctrl_offline_cpu(cpu); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 20b02d6f02c1..7997b47743a2 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -212,6 +212,9 @@ static int parse_line(char *line, struct resctrl_schema *s, struct rdt_domain *d; unsigned long dom_id; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); @@ -316,6 +319,9 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) struct rdt_domain *d; u32 idx; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -381,11 +387,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; - cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -447,7 +451,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdt_staged_configs_clear(); rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); return ret ?: nbytes; } @@ -467,6 +470,9 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo bool sep = false; u32 ctrl_val; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + seq_printf(s, "%*s:", max_name_width, schema->name); list_for_each_entry(dom, &r->domains, list) { if (sep) @@ -537,6 +543,9 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, { int cpu; + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + /* * Setup the parameters to pass to mon_event_count() to read the data. */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 67edd4c440f0..c34a35ec0f03 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -15,6 +15,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ +#include #include #include #include @@ -472,6 +473,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) lockdep_assert_held(&rdtgroup_mutex); + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; @@ -778,6 +782,7 @@ void cqm_handle_limbo(struct work_struct *work) unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); d = container_of(work, struct rdt_domain, cqm_limbo.work); @@ -792,6 +797,7 @@ void cqm_handle_limbo(struct work_struct *work) } mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } /** @@ -823,6 +829,7 @@ void mbm_handle_overflow(struct work_struct *work) struct rdt_resource *r; struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* @@ -856,6 +863,7 @@ void mbm_handle_overflow(struct work_struct *work) out_unlock: mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } /** diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8056bed033cc..884b88e25141 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -844,6 +844,9 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) struct rdt_domain *d_i; bool ret = false; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) return true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 777e9f680332..011e17efb1a6 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -35,6 +35,10 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + static struct kernfs_root *rdt_root; struct rdtgroup rdtgroup_default; LIST_HEAD(rdt_all_groups); @@ -1014,6 +1018,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, bool sep = false; u32 ctrl_val; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); hw_shareable = r->cache.shareable_bits; list_for_each_entry(dom, &r->domains, list) { @@ -1074,6 +1079,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, } seq_putc(seq, '\n'); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return 0; } @@ -1329,6 +1335,9 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) struct rdt_domain *d; u32 ctrl; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) @@ -1593,6 +1602,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid struct rdt_domain *dom; bool sep = false; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); list_for_each_entry(dom, &r->domains, list) { @@ -1609,6 +1619,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid seq_puts(s, "\n"); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return 0; } @@ -1690,6 +1701,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) unsigned long dom_id, val; struct rdt_domain *d; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + next: if (!tok || tok[0] == '\0') return 0; @@ -1736,6 +1750,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); @@ -1745,6 +1760,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return ret ?: nbytes; } @@ -1760,6 +1776,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); @@ -1769,6 +1786,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return ret ?: nbytes; } @@ -2245,6 +2263,9 @@ static int set_cache_qos_cfg(int level, bool enable) struct rdt_domain *d; int cpu; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) @@ -2444,6 +2465,7 @@ struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) rdtgroup_kn_get(rdtgrp, kn); + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* Was this group deleted while we waited? */ @@ -2461,6 +2483,8 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) return; mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + rdtgroup_kn_put(rdtgrp, kn); } @@ -2793,6 +2817,9 @@ static int reset_all_ctrls(struct rdt_resource *r) struct rdt_domain *d; int i; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -3077,6 +3104,9 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, struct rdt_domain *dom; int ret; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + list_for_each_entry(dom, &r->domains, list) { ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); if (ret) @@ -3907,13 +3937,13 @@ static void domain_destroy_mon_state(struct rdt_domain *d) void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) { - lockdep_assert_held(&rdtgroup_mutex); + mutex_lock(&rdtgroup_mutex); if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) mba_sc_domain_destroy(r, d); if (!r->mon_capable) - return; + goto out_unlock; /* * If resctrl is mounted, remove all the @@ -3938,6 +3968,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) } domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); } static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) @@ -3973,20 +4006,22 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) { - int err; + int err = 0; - lockdep_assert_held(&rdtgroup_mutex); + mutex_lock(&rdtgroup_mutex); - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { /* RDT_RESOURCE_MBA is never mon_capable */ - return mba_sc_domain_allocate(r, d); + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } if (!r->mon_capable) - return 0; + goto out_unlock; err = domain_setup_mon_state(r, d); if (err) - return err; + goto out_unlock; if (is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); @@ -4006,15 +4041,18 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (resctrl_mounted && resctrl_arch_mon_capable()) mkdir_mondata_subdir_allrdtgrp(r, d); - return 0; +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; } void resctrl_online_cpu(unsigned int cpu) { - lockdep_assert_held(&rdtgroup_mutex); - + mutex_lock(&rdtgroup_mutex); /* The CPU is set in default rdtgroup after online. */ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); } static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) @@ -4033,8 +4071,7 @@ void resctrl_offline_cpu(unsigned int cpu) struct rdtgroup *rdtgrp; struct rdt_domain *d; - lockdep_assert_held(&rdtgroup_mutex); - + mutex_lock(&rdtgroup_mutex); list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { clear_childcpus(rdtgrp, cpu); @@ -4043,7 +4080,7 @@ void resctrl_offline_cpu(unsigned int cpu) } if (!l3->mon_capable) - return; + goto out_unlock; d = get_domain_from_cpu(cpu, l3); if (d) { @@ -4057,6 +4094,9 @@ void resctrl_offline_cpu(unsigned int cpu) cqm_setup_limbo_handler(d, 0, cpu); } } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); } /* diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 270ff1d5c051..a365f67131ec 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -159,7 +159,7 @@ struct resctrl_schema; * @cache_level: Which cache level defines scope of this resource * @cache: Cache allocation related data * @membw: If the component has bandwidth controls, their properties. - * @domains: All domains for this resource + * @domains: RCU list of all domains for this resource * @name: Name to use in "schemata" file. * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. -- Gitee From 5240dd59d561d595dd1416a397bde7a4948564da Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 21 Feb 2024 12:23:06 +0000 Subject: [PATCH 533/953] x86/resctrl: Remove lockdep annotation that triggers false positive ANBZ: #8626 commit c0d848fcb09d80a5f48b99f85e448185125ef59f upstream. get_domain_from_cpu() walks a list of domains to find the one that contains the specified CPU. This needs to be protected against races with CPU hotplug when the list is modified. It has recently gained a lockdep annotation to check this. The lockdep annotation causes false positives when called via IPI as the lock is held, but by another process. Remove it. [ bp: Refresh it ontop of x86/cache. ] Fixes: fb700810d30b ("x86/resctrl: Separate arch and fs resctrl locks") Reported-by: Tony Luck Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/all/ZdUSwOM9UUNpw84Y@agluck-desk3 Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 490a234ec9fd..8702d34711df 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -362,15 +362,6 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; - /* - * Walking r->domains, ensure it can't race with cpuhp. - * Because this is called via IPI by rdt_ctrl_update(), assertions - * about locks this thread holds will lead to false positives. Check - * someone is holding the CPUs lock. - */ - if (IS_ENABLED(CONFIG_HOTPLUG_CPU) && IS_ENABLED(CONFIG_LOCKDEP)) - WARN_ON_ONCE(!lockdep_is_cpus_held()); - list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) -- Gitee From 8fd2b670ab6238b5c51377230547006b743e4d7c Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 29 Mar 2024 17:19:43 +0800 Subject: [PATCH 534/953] anolis: configs: adjust kconfigs to support some important arch ANBZ: #8598 Adjust kconfigs to support ZHAOXIN, PHYTIUM and KUNPENG. By the way, some important kconfigs, such as CONFIG_UACCE, was modified, too. Here are the kconfig list to be adjusted: x86: CONFIG_UACCE=m CONFIG_SCHED_CLUSTER=y CONFIG_SATA_ZHAOXIN=m CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_I2C_ZHAOXIN=m CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m CONFIG_I2C_ZHAOXIN_SMBUS=m arm64: CONFIG_ARM_SMMU_V3_PMU=m CONFIG_SPI_HISI_KUNPENG=m CONFIG_NTB=m CONFIG_UACCE=m CONFIG_ARCH_PHYTIUM=y CONFIG_ARM_GIC_PHYTIUM_2500=y CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_INFINIBAND_HNS_HIP08=y CONFIG_SPI_HISI_SFC_V3XX=m CONFIG_ARM_SMMU_V3_SVA=y Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2973 --- arch/arm64/configs/anolis-debug_defconfig | 32 ++++++++++++++++------- arch/arm64/configs/anolis_defconfig | 32 ++++++++++++++++------- arch/x86/configs/anolis-debug_defconfig | 19 +++++++++----- arch/x86/configs/anolis_defconfig | 16 +++++++----- 4 files changed, 67 insertions(+), 32 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 43c9990b1068..8b932435df46 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -337,6 +337,7 @@ CONFIG_ARCH_HISI=y # CONFIG_ARCH_NXP is not set # CONFIG_ARCH_MA35 is not set # CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y CONFIG_ARCH_QCOM=y # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set @@ -2328,7 +2329,7 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_MISC_ALCOR_PCI is not set # CONFIG_MISC_RTSX_PCI is not set # CONFIG_MISC_RTSX_USB is not set -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -3486,8 +3487,8 @@ CONFIG_SPI_DESIGNWARE=m # CONFIG_SPI_DW_DMA is not set # CONFIG_SPI_DW_PCI is not set CONFIG_SPI_DW_MMIO=m -# CONFIG_SPI_HISI_KUNPENG is not set -# CONFIG_SPI_HISI_SFC_V3XX is not set +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_MICROCHIP_CORE is not set @@ -5126,7 +5127,7 @@ CONFIG_INFINIBAND_CXGB4=m # CONFIG_INFINIBAND_EFA is not set CONFIG_INFINIBAND_ERDMA=m CONFIG_INFINIBAND_HNS=m -# CONFIG_INFINIBAND_HNS_HIP08 is not set +CONFIG_INFINIBAND_HNS_HIP08=y # CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m @@ -5357,6 +5358,7 @@ CONFIG_VFIO_PCI_MMAP=y CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m # CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set # end of VFIO support for PCI devices # @@ -5531,6 +5533,7 @@ CONFIG_IOMMU_DEFAULT_DMA_STRICT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y # CONFIG_IOMMUFD is not set CONFIG_ARM_SMMU=y # CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set @@ -5538,7 +5541,7 @@ CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_QCOM=y # CONFIG_ARM_SMMU_QCOM_DEBUG is not set CONFIG_ARM_SMMU_V3=y -# CONFIG_ARM_SMMU_V3_SVA is not set +CONFIG_ARM_SMMU_V3_SVA=y # CONFIG_QCOM_IOMMU is not set # CONFIG_VIRTIO_IOMMU is not set @@ -5649,7 +5652,15 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set # CONFIG_IIO is not set -# CONFIG_NTB is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_DEBUG is not set @@ -5671,6 +5682,7 @@ CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y # CONFIG_AL_FIC is not set CONFIG_HISILICON_IRQ_MBIGEN=y # CONFIG_XILINX_INTC is not set @@ -5749,7 +5761,7 @@ CONFIG_ARM_CCN=y CONFIG_ARM_CMN=y CONFIG_ARM_PMU=y CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_SMMU_V3_PMU=m CONFIG_ARM_PMUV3=y CONFIG_ARM_DSU_PMU=y CONFIG_QCOM_L2_PMU=y @@ -5760,11 +5772,11 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set # end of Performance monitor support @@ -6527,9 +6539,9 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set CONFIG_CRYPTO_DEV_HISI_SEC=m -# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_QM=m -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 0e3103e1b10c..ba7a0a1e15f4 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -335,6 +335,7 @@ CONFIG_ARCH_HISI=y # CONFIG_ARCH_NXP is not set # CONFIG_ARCH_MA35 is not set # CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y CONFIG_ARCH_QCOM=y # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set @@ -2325,7 +2326,7 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_MISC_ALCOR_PCI is not set # CONFIG_MISC_RTSX_PCI is not set # CONFIG_MISC_RTSX_USB is not set -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -3483,8 +3484,8 @@ CONFIG_SPI_DESIGNWARE=m # CONFIG_SPI_DW_DMA is not set # CONFIG_SPI_DW_PCI is not set CONFIG_SPI_DW_MMIO=m -# CONFIG_SPI_HISI_KUNPENG is not set -# CONFIG_SPI_HISI_SFC_V3XX is not set +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_MICROCHIP_CORE is not set @@ -5123,7 +5124,7 @@ CONFIG_INFINIBAND_CXGB4=m # CONFIG_INFINIBAND_EFA is not set CONFIG_INFINIBAND_ERDMA=m CONFIG_INFINIBAND_HNS=m -# CONFIG_INFINIBAND_HNS_HIP08 is not set +CONFIG_INFINIBAND_HNS_HIP08=y # CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m @@ -5353,6 +5354,7 @@ CONFIG_VFIO_PCI_MMAP=y CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m # CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set # end of VFIO support for PCI devices # @@ -5527,6 +5529,7 @@ CONFIG_IOMMU_DEFAULT_DMA_STRICT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y # CONFIG_IOMMUFD is not set CONFIG_ARM_SMMU=y # CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set @@ -5534,7 +5537,7 @@ CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_QCOM=y # CONFIG_ARM_SMMU_QCOM_DEBUG is not set CONFIG_ARM_SMMU_V3=y -# CONFIG_ARM_SMMU_V3_SVA is not set +CONFIG_ARM_SMMU_V3_SVA=y # CONFIG_QCOM_IOMMU is not set # CONFIG_VIRTIO_IOMMU is not set @@ -5645,7 +5648,15 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set # CONFIG_IIO is not set -# CONFIG_NTB is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_DEBUG is not set @@ -5667,6 +5678,7 @@ CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y # CONFIG_AL_FIC is not set CONFIG_HISILICON_IRQ_MBIGEN=y # CONFIG_XILINX_INTC is not set @@ -5745,7 +5757,7 @@ CONFIG_ARM_CCN=y CONFIG_ARM_CMN=y CONFIG_ARM_PMU=y CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_SMMU_V3_PMU=m CONFIG_ARM_PMUV3=y CONFIG_ARM_DSU_PMU=y CONFIG_QCOM_L2_PMU=y @@ -5756,11 +5768,11 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set # end of Performance monitor support @@ -6523,9 +6535,9 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set CONFIG_CRYPTO_DEV_HISI_SEC=m -# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_QM=m -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 36a089094635..05b04486b420 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -414,7 +414,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -2430,7 +2430,7 @@ CONFIG_VMWARE_VMCI=m # CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -2597,7 +2597,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set +CONFIG_SATA_ZHAOXIN=m # # PATA SFF controllers with BMDMA @@ -3751,7 +3751,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=y +CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3839,13 +3839,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m -# CONFIG_I2C_ZHAOXIN is not set +CONFIG_I2C_ZHAOXIN=m # # ACPI drivers # CONFIG_I2C_SCMI=m -# CONFIG_I2C_ZHAOXIN_SMBUS is not set +CONFIG_I2C_ZHAOXIN_SMBUS=m # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -6655,6 +6655,7 @@ CONFIG_IDLE_INJECT=y # # Performance monitor support # +# CONFIG_DWC_PCIE_PMU is not set # end of Performance monitor support CONFIG_RAS=y @@ -7387,6 +7388,7 @@ CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m @@ -7404,16 +7406,19 @@ CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m # end of Accelerated Cryptographic Algorithms for CPU (x86) CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m CONFIG_CRYPTO_DEV_PADLOCK_AES=m CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ZHAOXIN is not set # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set CONFIG_CRYPTO_DEV_CCP=y @@ -7421,6 +7426,7 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7813,6 +7819,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 23258f261dcb..ca8ff01300ac 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -411,7 +411,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -2425,7 +2425,7 @@ CONFIG_VMWARE_VMCI=m # CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -2592,7 +2592,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set +CONFIG_SATA_ZHAOXIN=m # # PATA SFF controllers with BMDMA @@ -3745,7 +3745,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=y +CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3833,13 +3833,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m -# CONFIG_I2C_ZHAOXIN is not set +CONFIG_I2C_ZHAOXIN=m # # ACPI drivers # CONFIG_I2C_SCMI=m -# CONFIG_I2C_ZHAOXIN_SMBUS is not set +CONFIG_I2C_ZHAOXIN_SMBUS=m # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -6644,6 +6644,7 @@ CONFIG_IDLE_INJECT=y # # Performance monitor support # +# CONFIG_DWC_PCIE_PMU is not set # end of Performance monitor support CONFIG_RAS=y @@ -7399,6 +7400,7 @@ CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m # end of Accelerated Cryptographic Algorithms for CPU (x86) CONFIG_CRYPTO_HW=y @@ -7415,6 +7417,7 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7771,6 +7774,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y -- Gitee From ff2a060f38372dd40b5429357b01f8d5b05adac4 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:46:36 +0800 Subject: [PATCH 535/953] anolis: crypto: ccp: concurrent psp access support between user and kernel space ANBZ: #8628 Add a self-defined mutex to support concurrent psp access between kernel space and user space. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-dev.c | 173 +++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-dev.h | 16 ++++ drivers/crypto/ccp/sev-dev.c | 69 ++++++++++++-- 3 files changed, 248 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 223e198eddec..f26026733356 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,6 +9,7 @@ #include #include +#include #include "sp-dev.h" #include "psp-dev.h" @@ -19,6 +20,55 @@ struct psp_device *psp_master; +struct psp_misc_dev *psp_misc; +int is_hygon_psp; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while (time_before(jiffies, je)); + + return ret; +} + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -156,9 +206,120 @@ static int psp_init(struct psp_device *psp) return 0; } +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_ERR "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, +}; + +static int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} + +static void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; +} + int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); struct psp_device *psp; int ret; @@ -186,6 +347,15 @@ int psp_dev_init(struct sp_device *sp) iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { + is_hygon_psp = 1; + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_err; + } + } + /* Request an irq */ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); if (ret) { @@ -237,6 +407,9 @@ void psp_dev_destroy(struct sp_device *sp) tee_dev_destroy(psp); + if (is_hygon_psp && psp_misc) + kref_put(&psp_misc->refcount, hygon_psp_exit); + dbc_dev_destroy(psp); platform_access_dev_destroy(psp); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 45b6e17d5770..c3cd027197fa 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "sp-dev.h" @@ -58,6 +59,21 @@ struct psp_device { unsigned int capability; }; +#define PSP_MUTEX_TIMEOUT 10000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, void *data); void psp_clear_sev_irq_handler(struct psp_device *psp); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 49e8bfbcedaa..62e7a1e210e8 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -66,6 +66,10 @@ static bool psp_dead; static int psp_timeout; static int csv_comm_mode = CSV_COMM_MAILBOX_ON; +extern int is_hygon_psp; +extern struct psp_misc_dev *psp_misc; +extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +extern int psp_mutex_unlock(struct psp_mutex *mutex); /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator @@ -553,7 +557,13 @@ static int csv_do_ringbuf_cmds(int *psp_ret) struct sev_user_data_status data; int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __csv_ring_buffer_enter_locked(psp_ret); if (rc) @@ -566,7 +576,10 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -575,9 +588,18 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -696,9 +718,18 @@ int sev_platform_init(int *error) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -737,9 +768,18 @@ static int sev_platform_shutdown(int *error) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -1415,7 +1455,13 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return -EINVAL; } - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { switch (input.cmd) { @@ -1475,7 +1521,10 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return ret; } -- Gitee From 03aed52b03ea9d5081ebaef07b91a9635b53fe62 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:50:25 +0800 Subject: [PATCH 536/953] anolis: crypto: ccp: Add psp mutex enable ioctl support ANBZ: #8628 Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-dev.c | 52 +++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/psp-dev.h | 2 +- drivers/crypto/ccp/sev-dev.c | 28 +++++++++++-------- 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index f26026733356..b1c0c205dac7 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,6 +9,7 @@ #include #include +#include #include #include "sp-dev.h" @@ -22,6 +23,14 @@ struct psp_device *psp_master; struct psp_misc_dev *psp_misc; int is_hygon_psp; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_PSP_OPCODE_MAX_NR, +}; +int psp_mutex_enabled; +extern struct mutex sev_cmd_mutex; uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) { @@ -55,7 +64,7 @@ int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) ret = 1; break; } - } while (time_before(jiffies, je)); + } while ((ms == 0) || time_before(jiffies, je)); return ret; } @@ -261,11 +270,51 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(&sev_cmd_mutex); + psp_mutex_enabled = 1; + mutex_unlock(&sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(&sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(&sev_cmd_mutex); + break; + + default: + printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return 0; +} + static const struct file_operations psp_fops = { .owner = THIS_MODULE, .mmap = mmap_psp, .read = read_psp, .write = write_psp, + .unlocked_ioctl = ioctl_psp, }; static int hygon_psp_additional_setup(struct sp_device *sp) @@ -349,6 +398,7 @@ int psp_dev_init(struct sp_device *sp) if (pdev->vendor == PCI_VENDOR_ID_HYGON) { is_hygon_psp = 1; + psp_mutex_enabled = 0; ret = hygon_psp_additional_setup(sp); if (ret) { dev_err(dev, "psp: unable to do additional setup\n"); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index c3cd027197fa..b0a7bf42e552 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -59,7 +59,7 @@ struct psp_device { unsigned int capability; }; -#define PSP_MUTEX_TIMEOUT 10000 +#define PSP_MUTEX_TIMEOUT 600000 struct psp_mutex { uint64_t locked; }; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 62e7a1e210e8..55b65ca89b9b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -38,7 +38,7 @@ #define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 -static DEFINE_MUTEX(sev_cmd_mutex); +DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static int psp_cmd_timeout = 100; @@ -70,6 +70,7 @@ extern int is_hygon_psp; extern struct psp_misc_dev *psp_misc; extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); extern int psp_mutex_unlock(struct psp_mutex *mutex); +extern int psp_mutex_enabled; /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator @@ -556,8 +557,9 @@ static int csv_do_ringbuf_cmds(int *psp_ret) { struct sev_user_data_status data; int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -576,7 +578,7 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -587,8 +589,9 @@ static int csv_do_ringbuf_cmds(int *psp_ret) static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -596,7 +599,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) mutex_lock(&sev_cmd_mutex); } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -717,8 +720,9 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -726,7 +730,7 @@ int sev_platform_init(int *error) mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_init_locked(error); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -767,8 +771,9 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -776,7 +781,7 @@ static int sev_platform_shutdown(int *error) mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_shutdown_locked(NULL); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -1437,6 +1442,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1455,7 +1461,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return -EINVAL; } - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -1521,7 +1527,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); -- Gitee From ce2b4191a2f7c4450b5d674952cb0652ed951af7 Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 10:53:18 +0800 Subject: [PATCH 537/953] anolis: kvm: Support psp virtualization ANBZ: #8628 Add KVM_HC_PSP_OP option to kvm_emulate_hypercall, Used to receive PSP commands for virtual machines. Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/hygon/psp.c | 83 ++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 5 +- drivers/crypto/ccp/sev-dev.c | 84 +++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 5 ++ include/uapi/linux/kvm_para.h | 1 + 7 files changed, 180 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kvm/hygon/psp.c diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a46465695e0d..36587c6db312 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2146,6 +2146,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 80e3fe184d17..20fa8b2de8a4 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,7 @@ include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o + mmu/spte.o hygon/psp.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c new file mode 100644 index 000000000000..40f5ee0e6e42 --- /dev/null +++ b/arch/x86/kvm/hygon/psp.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, + int op) +{ + return 0; +} + +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + void *data; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + int psp_ret = 0; + int ret = 0; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + ret = -EFAULT; + goto e_free; + } + + ret = psp_do_cmd(cmd, data, &psp_ret); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); + ret = -EIO; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) { + ret = -EFAULT; + goto e_free; + } + + return ret; + +e_free: + kfree(data); + return ret; +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7d086e6a8033..665e77008531 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9881,7 +9881,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - nr != KVM_HC_VM_ATTESTATION) { + !(nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP)) { ret = -KVM_EPERM; goto out; } @@ -9949,6 +9949,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_x86_ops.vm_attestation) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + break; default: ret = -KVM_ENOSYS; break; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 55b65ca89b9b..5c15ce849872 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -438,6 +438,68 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + static int __csv_ring_buffer_enter_locked(int *error) { struct psp_device *psp = psp_master; @@ -607,6 +669,28 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) return rc; } +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(psp_do_cmd); + static int __sev_init_locked(int *error) { struct sev_data_init data; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 55dd35ce920f..c18706b3e47b 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -633,6 +633,8 @@ struct csv_data_ring_buffer { #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int psp_do_cmd(int cmd, void *data, int *psp_ret); + /** * sev_platform_init - perform SEV INIT command * @@ -763,6 +765,9 @@ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int +psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 67192835455e..86369b7a5733 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,6 +31,7 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ /* * hypercalls use architecture specific -- Gitee From 54a2c3f00d3bde30097d35948148dc94a5e8bf6a Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 11:05:02 +0800 Subject: [PATCH 538/953] anolis: crypto: ccp: Support sending tkm commands based on ringbuffer ANBZ: #8628 Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-ringbuf.c | 23 ++ drivers/crypto/ccp/psp-ringbuf.h | 4 + drivers/crypto/ccp/sev-dev.c | 468 ++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 50 ++++ 4 files changed, 543 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 3b2f461b672c..9b5f886c0b40 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -22,6 +22,7 @@ static void enqueue_data(struct csv_queue *queue, unsigned int l; void *data; + off &= queue->mask; if (esize != 1) { off *= esize; size *= esize; @@ -117,3 +118,25 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, queue->head += len; return len; } + +unsigned int csv_dequeue_cmd(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *queue) +{ + unsigned int free_size; + + free_size = queue_avail_size(queue); + return queue->mask - free_size; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 50e014deb5ce..336352cc7a66 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -32,4 +32,8 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 5c15ce849872..d2faf83183dc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -69,9 +69,24 @@ static int csv_comm_mode = CSV_COMM_MAILBOX_ON; extern int is_hygon_psp; extern struct psp_misc_dev *psp_misc; extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +extern int psp_mutex_trylock(struct psp_mutex *mutex); extern int psp_mutex_unlock(struct psp_mutex *mutex); extern int psp_mutex_enabled; +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -1669,7 +1684,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer return -ENOMEM; csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); if (!stat_val_buffer) { @@ -1678,7 +1693,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer } csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); return 0; free_cmdptr: @@ -1795,6 +1810,455 @@ int csv_ring_buffer_queue_free(void) } EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command in ringbuffer mode timed out, disabling PSP\n"); + psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + kfree(status); + return rb_supported; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (is_hygon_psp && mutex_enabled) + ret = psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); + else + ret = mutex_trylock(&sev_cmd_mutex); + + if (ret) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + ret = __sev_do_cmd_locked(cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + goto end; + } + } else { + /* mailbox mode */ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index c18706b3e47b..df88daf170d3 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -631,6 +631,46 @@ struct csv_data_ring_buffer { u16 int_on_empty; /* In */ } __packed; +/** + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -763,6 +803,9 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -801,6 +844,13 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } +static inline int +vpsp_try_get_result(uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 5a4c295e990f9ccd88d206d2a33821dfc87044bf Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 26 Mar 2024 17:07:17 +0800 Subject: [PATCH 539/953] anolis: kvm: Support tkm virtualization ANBZ: #8628 The virtual machine enters the kernel through the vmmcall instruction, and puts the tkm command into the csv_ringbuffer and passes it to the PSP for processing. Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 584 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 556 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index 40f5ee0e6e42..c8edc99a92df 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -11,6 +11,43 @@ #include #include #include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt + +/* + * The file mainly implements the base execution + * logic of virtual PSP in kernel mode, which mainly includes: + * (1) Obtain the VM command and preprocess the pointer + * mapping table information in the command buffer + * (2) The command that has been converted will interact + * with the channel of the psp through the driver and + * try to obtain the execution result + * (3) The executed command data is recovered according to + * the multilevel pointer of the mapping table, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall) + * | + * | |-> kvm_pv_psp_cmd_pre_op + * | | + * | | -> guest_addr_map_table_op + * | | + * | | -> guest_multiple_level_gpa_replace + * | + * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * | + * | -> guest_addr_map_table_op + * | + * | -> guest_multiple_level_gpa_restore + */ struct psp_cmdresp_head { uint32_t buf_size; @@ -18,66 +55,557 @@ struct psp_cmdresp_head { uint32_t cmdresp_code; } __packed; -int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, - int op) +/** + * struct map_tbl - multilevel pointer address mapping table + * + * @parent_pa: parent address block's physics address + * @offset: offset in parent address block + * @size: submemory size + * @align: submemory align size, hva need to keep size alignment in kernel + * @hva: submemory copy block in kernel virtual address + */ +struct map_tbl { + uint64_t parent_pa; + uint32_t offset; + uint32_t size; + uint32_t align; + uint64_t hva; +} __packed; + +struct addr_map_tbls { + uint32_t tbl_nums; + struct map_tbl tbl[]; +} __packed; + +/* gpa and hva conversion maintenance table for internal use */ +struct gpa2hva_t { + void *hva; + gpa_t gpa; +}; + +struct gpa2hva_tbls { + uint32_t max_nums; + uint32_t tbl_nums; + struct gpa2hva_t tbl[]; +}; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; + struct addr_map_tbls *map_tbls; + struct gpa2hva_tbls *g2h_tbls; +}; + +/* + * Virtual PSP host memory information maintenance, used in ringbuffer mode + */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) { - return 0; + int i; + + pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); + for (i = 0; i < tbls->tbl_nums; i++) { + pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", + i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, + tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); + } + pr_info("\n"); } -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa) +void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) { - void *data; - struct psp_cmdresp_head psp_head; - uint32_t data_size; - int psp_ret = 0; - int ret = 0; + int i; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, - sizeof(struct psp_cmdresp_head)))) + pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, + tbls->max_nums); + for (i = 0; i < tbls->tbl_nums; i++) + pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, + (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); + pr_info("\n"); +} + +static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +{ + uint32_t fill_idx = tbls->tbl_nums; + + if (fill_idx >= tbls->max_nums) return -EFAULT; - data_size = psp_head.buf_size; - data = kzalloc(data_size, GFP_KERNEL); - if (!data) + tbls->tbl[fill_idx].hva = hva; + tbls->tbl[fill_idx].gpa = gpa; + tbls->tbl_nums = fill_idx + 1; + + return 0; +} + +static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + g2h->tbl[i].hva = NULL; + } +} + +static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].gpa == gpa) + return (void *)g2h->tbl[i].hva; + } + + return NULL; +} + +static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + return g2h->tbl[i].gpa; + } + + return 0; +} + +/* + * The virtual machine multilevel pointer command buffer handles the + * execution entity, synchronizes the data in the original gpa to the + * newly allocated hva(host virtual address) and updates the mapping + * relationship in the parent memory + */ +static int guest_multiple_level_gpa_replace(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + uint32_t sub_block_size; + uint64_t sub_paddr; + void *parent_kva = NULL; + + /* kmalloc memory for child block */ + sub_block_size = max(tbl->size, tbl->align); + tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); + if (!tbl->hva) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + /* get child gpa from parent gpa */ + if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + &sub_paddr, sizeof(sub_paddr)))) { + pr_err("[%s]: kvm_read_guest for parent gpa failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + /* copy child block data from gpa to hva */ + if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_read_guest for sub_data failed\n", + __func__); ret = -EFAULT; goto e_free; } - ret = psp_do_cmd(cmd, data, &psp_ret); - if (ret) { - pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); - ret = -EIO; + /* get hva from gpa */ + parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_kva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + /* replace pa of hva from gpa */ + *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); + + /* fill in gpa and hva to map table for restoring later */ + if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { + pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + return ret; + +e_free: + kfree((const void *)tbl->hva); + return ret; +} + +/* The virtual machine multi-level pointer command memory handles the + * execution entity, synchronizes the data in the hva(host virtual + * address) back to the memory corresponding to the gpa, and restores + * the mapping relationship in the original parent memory + */ +static int guest_multiple_level_gpa_restore(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + gpa_t sub_gpa; + void *parent_hva = NULL; + + /* get gpa from hva */ + sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); + if (unlikely(!sub_gpa)) { + pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; } - if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, - sizeof(psp_ret)))) { + /* copy child block data from hva to gpa */ + if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; + } + + /* get parent hva from parent gpa */ + parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_hva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* restore gpa from pa of hva in parent block */ + *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + + /* free child block memory */ + clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); + kfree((const void *)tbl->hva); + tbl->hva = 0; + +end: + return ret; +} + +/* + * The virtual machine multilevel pointer command memory processing + * executes upper-layer abstract interfaces, including replacing and + * restoring two sub-processing functions + */ +static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, + struct addr_map_tbls *map_tbls, int op) +{ + int ret = 0; + int i; + uint64_t *sub_paddr_ptr; + + if (op) { + for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { + *sub_paddr_ptr = g2h->tbl[0].gpa; + continue; + } + } + + /* restore new pa of kva with the gpa from guest */ + if (unlikely(guest_multiple_level_gpa_restore(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } else { + for (i = 0; i < map_tbls->tbl_nums; i++) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { + *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); + map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; + continue; + } + } + + /* check if parent_pa is valid */ + if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { + pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", + __func__, i, map_tbls->tbl[i].parent_pa); + ret = -EFAULT; + goto end; + } + + /* replace the gpa from guest with the new pa of kva */ + if (unlikely(guest_multiple_level_gpa_replace(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } } +end: return ret; +} -e_free: +static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls + *map_tbl, void *data) +{ + int i; + + if (g2h) { + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { + kfree(g2h->tbl[i].hva); + g2h->tbl[i].hva = NULL; + } + } + kfree(g2h); + } + + kfree(map_tbl); kfree(data); +} + +/* + * Obtain the VM command and preprocess the pointer mapping table + * information in the command buffer, the processed data will be + * used to interact with the psp device + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, + gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + struct addr_map_tbls map_head, *map_tbls = NULL; + uint32_t map_tbl_size; + struct gpa2hva_tbls *g2h = NULL; + uint32_t g2h_tbl_size; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + if (table_gpa) { + /* parse address map table from guest */ + if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + sizeof(struct addr_map_tbls)))) { + pr_err("[%s]: kvm_read_guest for map_head failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums + * sizeof(struct map_tbl); + map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); + if (!map_tbls) { + ret = -ENOMEM; + goto end; + } + + if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + map_tbl_size))) { + pr_err("[%s]: kvm_read_guest for map_tbls failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* init for gpa2hva table*/ + g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums + + 1) * sizeof(struct gpa2hva_t); + g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); + if (!g2h) { + ret = -ENOMEM; + goto end; + } + g2h->max_nums = map_head.tbl_nums + 1; + + /* fill the root parent address */ + if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { + pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + hbuf->data = data; + hbuf->data_size = data_size; + hbuf->map_tbls = map_tbls; + hbuf->g2h_tbls = g2h; + +end: + if (ret && data) + kfree(data); + return ret; +} + +/* + * The executed command data is recovered according to the multilevel + * pointer of the mapping table when the command has finished + * interacting with the psp device + */ +static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + if (hbuf->map_tbls) { + if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + hbuf->map_tbls, 1)) { + pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + /* restore cmdresp's buffer from context */ + if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } + +end: + /* release memory and clear hbuf */ + kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + + return ret; +} + +/* + * The primary implementation interface of virtual PSP in kernel mode + */ +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + + if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* multilevel pointer replace*/ + ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + /* backup host memory message for restoring later*/ + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; } -- Gitee From a449780a278e68a5982481e7a6c98cb10d0d3762 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 26 Dec 2023 16:59:41 +0800 Subject: [PATCH 540/953] anolis: crypto: ccp: Support tkm key isolation ANBZ: #8628 save qemu process PID to vid mapping, and when processing TKM commands, obtain the corresponding vid based on kvm->userspace_pid. After obtaining the vid, append the vid to the high 8 bits of the physical address and send it to the PSP. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 25 +++++- drivers/crypto/ccp/psp-dev.c | 161 ++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/sev-dev.c | 114 ++++++++++++++++++++++--- include/linux/psp-sev.h | 15 +++- 4 files changed, 298 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index c8edc99a92df..fd5b4839176b 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -49,6 +49,9 @@ * | -> guest_multiple_level_gpa_restore */ +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f + struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; @@ -513,6 +516,13 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, return ret; } +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + /* * The primary implementation interface of virtual PSP in kernel mode */ @@ -525,6 +535,17 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; uint32_t index = 0; + uint32_t vid = 0; + + // only tkm cmd need vid + if (cmd_type_is_tkm(vcmd->cmd_id)) { + // if vm without set vid, then tkm command is not allowed + ret = vpsp_get_vid(&vid, kvm->userspace_pid); + if (ret) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EFAULT; + } + } if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) @@ -543,7 +564,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_cmd failed\n", __func__); @@ -575,7 +596,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, CSV_COMMAND_PRIORITY_LOW; index = psp_ret.index; /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index b1c0c205dac7..994721c7848e 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include "sp-dev.h" #include "psp-dev.h" @@ -27,8 +30,23 @@ int is_hygon_psp; enum HYGON_PSP_OPCODE { HYGON_PSP_MUTEX_ENABLE = 1, HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, HYGON_PSP_OPCODE_MAX_NR, }; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + unsigned char reserved[128]; + } data; +}; + int psp_mutex_enabled; extern struct mutex sev_cmd_mutex; @@ -270,9 +288,141 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) { unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); @@ -289,6 +439,7 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the psp lock. mdelay(10); psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; break; case HYGON_PSP_MUTEX_DISABLE: @@ -300,13 +451,21 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the sev lock. mdelay(10); mutex_unlock(&sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); break; default: printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); return -EINVAL; } - return 0; + return ret; } static const struct file_operations psp_fops = { diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index d2faf83183dc..304e9b6f8a92 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -684,6 +684,73 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) return rc; } +static int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -1859,12 +1926,12 @@ static int vpsp_dequeue_cmd(int prio, int index, * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode */ -static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) { struct csv_cmdptr_entry cmdptr = { }; int index = -1; - cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; @@ -2132,7 +2199,7 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret) { int ret = 0; @@ -2157,7 +2224,7 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, if (vpsp_queue_cmd_size(prio) == 1) { /* dequeue command from queue*/ vpsp_dequeue_cmd(prio, index, &cmd); - ret = __sev_do_cmd_locked(cmd.cmd_id, data, + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, (int *)psp_ret); psp_ret->status = VPSP_FINISH; if (unlikely(ret)) { @@ -2175,7 +2242,8 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, index); psp_ret->status = VPSP_FINISH; if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); goto end; } } @@ -2194,6 +2262,30 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, } EXPORT_SYMBOL_GPL(vpsp_try_get_result); +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(&sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + /* * Send the virtual psp command to the PSP device and try to get the * execution result, the interface and the vpsp_try_get_result @@ -2202,7 +2294,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result); * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -2214,10 +2306,10 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -2233,14 +2325,14 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) } /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, data, psp_ret); + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; } } else { /* mailbox mode */ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index df88daf170d3..75f54d8e49ae 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -671,6 +671,12 @@ struct vpsp_ret { u32 status : 2; }; +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -803,9 +809,12 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); +int vpsp_get_vid(uint32_t *vid, pid_t pid); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -849,7 +858,7 @@ vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int -vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From a945d6daed1df45d1ca84da072f193e78489cf00 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sun, 18 Feb 2024 22:56:37 +0800 Subject: [PATCH 541/953] anolis: crypto: ccp: Allow VM without a configured vid to use TKM ANBZ: #8628 When no vid is assigned to the qemu virtual machine, the virtual machine can use the default vid0 when executing the tkm command, but this will share the key space with the host. You can use ioctl to operate /dev/hygon_psp_config to control the behavior of the default vid. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 4 ++-- drivers/crypto/ccp/psp-dev.c | 33 +++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 4 ++++ 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index fd5b4839176b..9181ec2406ec 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -539,9 +539,9 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { - // if vm without set vid, then tkm command is not allowed + // check the permission to use the default vid when no vid is set ret = vpsp_get_vid(&vid, kvm->userspace_pid); - if (ret) { + if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 994721c7848e..c110ae79d93f 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -37,12 +37,16 @@ enum HYGON_PSP_OPCODE { enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_ADD, VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, }; struct vpsp_dev_ctrl { unsigned char op; union { unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; unsigned char reserved[128]; } data; }; @@ -316,6 +320,23 @@ static void swap_vid_entries(void *a, void *b, int size) memcpy(b, &entry, size); } +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + /** * When the virtual machine executes the 'tkm' command, * it needs to retrieve the corresponding 'vid' @@ -333,6 +354,7 @@ int vpsp_get_vid(uint32_t *vid, pid_t pid) if (!existing_entry) return -ENOENT; + if (vid) { *vid = existing_entry->vid; pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); @@ -411,6 +433,14 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) ret = vpsp_del_vid(); break; + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + default: ret = -EINVAL; break; @@ -459,6 +489,9 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) sizeof(struct vpsp_dev_ctrl))) return -EFAULT; ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; break; default: diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 75f54d8e49ae..1536d0057738 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -815,6 +815,8 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); int vpsp_get_vid(uint32_t *vid, pid_t pid); + +int vpsp_get_default_vid_permission(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -860,6 +862,8 @@ vpsp_try_get_result(uint8_t prio, uint32_t index, static inline int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 214524d930f689a05c87b9affdb7680e35878256 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Mon, 18 Sep 2023 17:26:45 +0800 Subject: [PATCH 542/953] anolis: drm/hygon: Add support to passthrough Hygon DCU to virtual machine ANBZ: #8574 PCI RESET will cause failure to passthrough Hygon DCU to the guest. Fix this issue by add hydcu-fixup-header driver to disable PCI RESET. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2915 --- Documentation/gpu/hydcu-fixup-header.rst | 13 +++ drivers/gpu/drm/Kconfig | 6 ++ drivers/gpu/drm/Makefile | 1 + .../gpu/drm/hygon/hydcu-fixup-header/Makefile | 3 + .../hydcu_pci_fixup_header.c | 93 +++++++++++++++++++ 5 files changed, 116 insertions(+) create mode 100644 Documentation/gpu/hydcu-fixup-header.rst create mode 100644 drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile create mode 100644 drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c diff --git a/Documentation/gpu/hydcu-fixup-header.rst b/Documentation/gpu/hydcu-fixup-header.rst new file mode 100644 index 000000000000..5dca3ff3a137 --- /dev/null +++ b/Documentation/gpu/hydcu-fixup-header.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= + drm/hygon/hydcu-fixup-header hydcu-fixup-header driver +========================= + +The drm/hygon/hydcu-fixup-header driver supports all HYGON DCUs. + +General description +====================== + +The drm/hygon/hydcu-fixup-header driver adds flags NO_BUS_RESET to hydcu +device to disable vfio pci reset, as dcu is not support now. diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index ec4abf9ff47b..353ffa210f0e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -436,3 +436,9 @@ config DRM_LIB_RANDOM config DRM_PRIVACY_SCREEN bool default n + +config HYDCU_FIXUP_HEADER + bool "Enable fixup header support for HYDCU" + help + Choose this option if you want to use pci passthrough with HYDCU + HYDCU cannot support pci reset, so enable this module to disable pci reset diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 215e78e79125..12ad840d9e3a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -198,3 +198,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile new file mode 100644 index 000000000000..2dc816df4239 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hydcu_pci_fixup_header.o \ No newline at end of file diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c new file mode 100644 index 000000000000..962f0d74f703 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON DCU fixup driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define PCI_VENDOR_ID_HYGON 0x1d94 + +#define DEVICE_Z100SM 0x51b7 +#define DEVICE_C878182 0x52b7 +#define DEVICE_C878186 0x53b7 +#define DEVICE_Z100 0x54b7 +#define DEVICE_Z100L 0x55b7 +#define DEVICE_C878181 0x56b7 +#define DEVICE_C878185 0x57b7 +#define DEVICE_C878188 0x58b7 +#define DEVICE_C878174 0x59b7 +#define DEVICE_KONGMING 0x61b7 +#define DEVICE_KONGMING_E 0x6210 + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "huangjun " +#define DRIVER_DESC "fix dcu header" + +static int hydcu_pci_fixup_header_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + dev_info(&pdev->dev, "add flags NO_BUS_RESET\n"); + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + pdev->pm_cap = 0; + dev_info(&pdev->dev, "will abort probe\n"); + + return -EINVAL; +} + +static void hydcu_pci_fixup_header_remove(struct pci_dev *pdev) +{ +} + +static const struct pci_device_id hydcu_pci_fixup_header_ids[] = { + { PCI_VDEVICE(HYGON, DEVICE_Z100SM), }, + { PCI_VDEVICE(HYGON, DEVICE_C878182), }, + { PCI_VDEVICE(HYGON, DEVICE_C878186), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100L), }, + { PCI_VDEVICE(HYGON, DEVICE_C878181), }, + { PCI_VDEVICE(HYGON, DEVICE_C878185), }, + { PCI_VDEVICE(HYGON, DEVICE_C878188), }, + { PCI_VDEVICE(HYGON, DEVICE_C878174), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING_E), }, + {}, +}; + +static struct pci_driver hydcu_pci_fixup_header_driver = { + .name = "hydcu-fixup-header", + .id_table = hydcu_pci_fixup_header_ids, + .probe = hydcu_pci_fixup_header_probe, + .remove = hydcu_pci_fixup_header_remove, +}; + +static int __init hydcu_pci_fixup_header_init(void) +{ + /* Register and scan for devices */ + return pci_register_driver(&hydcu_pci_fixup_header_driver); +} + +static void __exit hydcu_pci_fixup_header_cleanup(void) +{ + pci_unregister_driver(&hydcu_pci_fixup_header_driver); +} + +module_init(hydcu_pci_fixup_header_init); +module_exit(hydcu_pci_fixup_header_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); -- Gitee From f7dd1a7e1d59aa81ef43a5d661bd6e949e4fc948 Mon Sep 17 00:00:00 2001 From: yangge Date: Fri, 22 Mar 2024 08:32:28 -0400 Subject: [PATCH 543/953] anolis: mm/page_alloc: don't use PCP list for THP-sized allocations when using PF_MEMALLOC_PIN ANBZ: #8680 In the past, movable allocations could be disallowed from CMA through PF_MEMALLOC_PIN. However, since 5d0a661d808f ("mm/page_alloc: use only one PCP list for THP-sized allocations"), THP-sized pages of different types are put into one PCP list. When allocate a THP with PF_MEMALLOC_PIN, it would accidentally get a CMA page from PCP list, which will cause the program to not run correctly. So, PCP list can't be used for THP-sized allocations when using PF_MEMALLOC_PIN. Fixes: 5d0a661d808f ("mm/page_alloc: use only one PCP list for THP-sized allocations") Signed-off-by: yangge Signed-off-by: hanliyang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2983 --- mm/page_alloc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b7e3a5c37928..4ff8f6b9dffe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2788,10 +2788,20 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + order != pageblock_order) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) + goto out; + } +#else page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; +#endif } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, -- Gitee From 126b06fa6e4d3f8ebf9f2e324d50bf2cf0210749 Mon Sep 17 00:00:00 2001 From: yangge Date: Thu, 21 Mar 2024 08:46:33 -0400 Subject: [PATCH 544/953] anolis: mm/gup: don't check if a page is in lru before draining it ANBZ: #8680 Before migrating a page, we need to drain the page out of cpu's pagevecs if the page is in cpu's pagevecs. Otherwise, the migration will fail because of incorrect page reference. Whatever the return value of the function folio_test_lru() is, it does not tell whether the page is in cpu's pagevecs. Therefore, the check folio_test_lru() needs to be removed to ensure that the migration logic is correct. Signed-off-by: yangge Signed-off-by: hanliyang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2983 --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/gup.c b/mm/gup.c index 2f8a2d89fde1..29a720dfc163 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2054,7 +2054,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; } - if (!folio_test_lru(folio) && drain_allow) { + if (drain_allow) { lru_add_drain_all(); drain_allow = false; } -- Gitee From c915cd2724570e2f47e01c77fba79befa267a18b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 13 Nov 2023 01:54:26 +0800 Subject: [PATCH 545/953] anolis: KVM: SVM: Unmap ghcb pages if they're still mapped when destroy a CSV2 guest ANBZ: #8675 The ghcb pages might be mapped when KVM handling the VMGEXIT events, and these ghcb pages will be unmapped when prepare to switch to guest mode. If we try to kill the userspace VMM (e.g. qemu) for a CSV2 guest, it's possible that the mapped ghcb pages will never be unmapped which will cause memory leak. We exposed a serious memory leak by creating and killing multiple qemu processes for CSV2 guests frequently. In order to solve this issue, unmap ghcb pages if they're sill mapped when destroy CSV2 guest. Fixes: ce7ea0cfdc2e ("KVM: SVM: Move GHCB unmapping to fix RCU warning") Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2975 --- arch/x86/kvm/svm/sev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 457e0bf79b13..cfbca578aec5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2650,6 +2650,9 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); + if (svm->sev_es.ghcb) + kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map, false); + if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); -- Gitee From 489c54a4eb9c2c32a93c7f2c7f2fa60bac5dfc68 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Wed, 3 Apr 2024 11:11:35 +0800 Subject: [PATCH 546/953] anolis: sched: fix compile error when CONFIG_SCHED_CORE is disabled ANBZ: #8698 When CONFIG_SCHED_CORE is disabled, acpu_enabled and sysctl_sched_cfs_bw_burst_onset_percent cannot be found because they are defined under CONFIG_SCHED_CORE. This patch is to fix this problem. Fixes: aa1ad269896c("anolis: sched: introduce ACPU accounting") Fixes: 55a1c37487b1("anolis: sched/fair: Make CFS bandwidth controller burstable") Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2999 --- kernel/sched/core.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b84f75281e62..d59ef701ccfd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -151,15 +151,23 @@ const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; __read_mostly int scheduler_running; -#ifdef CONFIG_SCHED_CORE - -DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); - #ifdef CONFIG_SCHED_ACPU DEFINE_STATIC_KEY_FALSE(acpu_enabled); unsigned int sysctl_sched_acpu_enabled; #endif +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + +#ifdef CONFIG_SCHED_CORE + +DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); + /* kernel prio, less is more */ static inline int __task_prio(const struct task_struct *p) { @@ -175,14 +183,6 @@ static inline int __task_prio(const struct task_struct *p) return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ } -#ifdef CONFIG_CFS_BANDWIDTH -/* - * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, - * 0 by default. - */ -unsigned int sysctl_sched_cfs_bw_burst_onset_percent; -#endif - /* * l(a,b) * le(a,b) := !l(b,a) -- Gitee From 06b735f3b0637815deb5ff8f7beccbf51acb932e Mon Sep 17 00:00:00 2001 From: duanqiangwen Date: Wed, 6 Dec 2023 17:50:44 +0800 Subject: [PATCH 547/953] net: wangxun: fix changing mac failed when running ANBZ: #8484 commit 87e839c82cc36346a2cd183ca941316902110716 upstream. in some bonding mode, service need to change mac when netif is running. Wangxun netdev add IFF_LIVE_ADDR_CHANGE priv_flag to support it. Signed-off-by: duanqiangwen Link: https://lore.kernel.org/r/20231206095044.17844-1-duanqiangwen@net-swift.com Signed-off-by: Jakub Kicinski Reviewed-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 1 + drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a4d63d2f3c5b..2085b9c38a15 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -580,6 +580,7 @@ static int ngbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0ba4..2482b661bc99 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -637,6 +637,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - -- Gitee From dd499292e657f333798be6d3a6bff3a66c57178b Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:04 +0800 Subject: [PATCH 548/953] net: libwx: support hardware statistics ANBZ: #8484 commit 46b92e10d631b6a2c06d151929e87f1d39d72b8a upstream Implement update and clear Rx/Tx statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-2-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 169 ++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 8 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 99 ++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 2 + drivers/net/ethernet/wangxun/libwx/wx_lib.c | 20 ++- drivers/net/ethernet/wangxun/libwx/wx_type.h | 81 +++++++++ 6 files changed, 377 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294e7..ddc5f6d20b9c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,171 @@ #include #include +#include #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +176,12 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c69454..16d1a09369a6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,13 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 52130df26aee..ec92e0023ff5 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1911,6 +1911,105 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +/** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + /** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 0b3447bc6f2f..d3a0c65ef3ef 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -34,5 +34,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index e078f4071dc2..347d3cec02a3 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -421,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -654,6 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -809,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -888,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1465,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -2595,8 +2602,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2632,6 +2642,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c555af9ed51b..5386a418d3bc 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -59,6 +59,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +113,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +128,8 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -218,6 +242,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -300,6 +326,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) @@ -766,9 +793,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -812,6 +846,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -843,6 +878,33 @@ enum wx_isb_idx { WX_ISB_MAX }; +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -918,6 +980,14 @@ struct wx { u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) @@ -951,6 +1021,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ -- Gitee From 30546402b404a84c8db5784c000afe79d7a3fcd5 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Tue, 12 Sep 2023 11:14:24 +0800 Subject: [PATCH 549/953] net: wangxun: move MDIO bus implementation to the library ANBZ: #8484 commit f557524029458ab7dd1c6077f35fa23fbb744356 upstream. Move similar code of accessing MDIO bus from txgbe/ngbe to libwx. Signed-off-by: Jiawen Wu Reviewed-by: Simon Horman Signed-off-by: Paolo Abeni Link: https://lore.kernel.org/r/20230912031424.721386-1-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_hw.c | 92 ++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 7 ++ drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 + drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 119 +----------------- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3 - .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 56 +-------- 6 files changed, 106 insertions(+), 172 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index ec92e0023ff5..533e912af089 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index d3a0c65ef3ef..12c20a7c364d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5386a418d3bc..83f9bb7b3c22 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -277,6 +277,7 @@ enum WX_MSCA_CMD_value { #define WX_MSCC_SADDR BIT(18) #define WX_MSCC_BUSY BIT(22) #define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 591f5b7b6da6..6302ecca71bb 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } @@ -262,8 +151,8 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d5575..ff754d69bdf6 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,9 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index e457ac9ae6d8..b95187a0847f 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -647,58 +647,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int txgbe_ext_phy_init(struct txgbe *txgbe) { struct phy_device *phydev; @@ -715,8 +663,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) return -ENOMEM; mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; -- Gitee From 922922ce379aef773fd51661b4b947b57e4ef1f6 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:05 +0800 Subject: [PATCH 550/953] net: txgbe: add ethtool stats support ANBZ: #8484 commit 9224ade6539096585d35378fe2817b10b2bd7dc5 upstream. Support to show ethtool statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-3-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 5 +++++ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 2 ++ drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 2 ++ 3 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a..3f336a088e43 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -39,6 +39,11 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 372745250270..474d55524e82 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -306,6 +306,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 2482b661bc99..15fe3725670c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -286,6 +286,8 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } static void txgbe_down(struct wx *wx) -- Gitee From a1f64212e82ffca3794ade968df427d782da77c3 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:06 +0800 Subject: [PATCH 551/953] net: ngbe: add ethtool stats support ANBZ: #8484 commit 0a2714d5e2d3bf57f42d2ee58a04416a42f84f89 upstream. Support to show ethtool statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-4-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 5 +++++ drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c | 2 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 2 ++ 3 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9aac..afbdf6919071 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -49,6 +49,11 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de9527..6459bc1d7c22 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 2085b9c38a15..c2a2b6ef42dc 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -330,6 +330,8 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } static void ngbe_down(struct wx *wx) -- Gitee From babc37adda551ebd15327b37efe292be2b51e21d Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Tue, 17 Oct 2023 18:06:35 +0800 Subject: [PATCH 552/953] net: wangxun: remove redundant kernel log ANBZ: #8484 commit 48e44287c6537e736baa2e1d7be520d6ec91840a upstream. Since PBA info can be read from lspci, delete txgbe_read_pba_string() and the prints. In addition, delete the redundant MAC address printing. Signed-off-by: Jiawen Wu Reviewed-by: Simon Horman Reviewed-by: Andrew Lunn Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231017100635.154967-1-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 5 - drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 108 ------------------ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 1 - .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 -- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 6 - 5 files changed, 128 deletions(-) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index c2a2b6ef42dc..a5c623fd023e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -678,11 +678,6 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 474d55524e82..d6b2b3c781b6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -70,114 +70,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) wr32(wx, WX_TS_DALARM_THRE, 614); } -/** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb187a..1f3ecf60e3c4 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -6,7 +6,6 @@ int txgbe_disable_sec_tx_path(struct wx *wx); void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 15fe3725670c..a78da2309db5 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -538,7 +538,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -737,13 +736,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95..3ba9ce43f394 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,9 +95,6 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 -- Gitee From 1fcd680b82ea6fa50e7561d947ea4ce7c6a72aec Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:47 +0800 Subject: [PATCH 553/953] net: libwx: add phylink to libwx ANBZ: #8484 commit e8e138cf7383cf820419fcbec63992e75a01467b upstream. For the following implementation, add struct phylink and phylink_config to wx structure. Add the helper function for converting phylink to wx, implement ethtool ksetting and nway reset in libwx. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-2-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 26 +++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 5 ++++ drivers/net/ethernet/wangxun/libwx/wx_type.h | 8 ++++++ 3 files changed, 39 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index ddc5f6d20b9c..12feb8a5ee75 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -185,3 +185,29 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) } } EXPORT_SYMBOL(wx_get_drvinfo); + +int wx_nway_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_nway_reset(wx->phylink); +} +EXPORT_SYMBOL(wx_nway_reset); + +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_get_link_ksettings); + +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_set_link_ksettings); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 16d1a09369a6..f15cc445ae0f 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -13,4 +13,9 @@ void wx_get_mac_stats(struct net_device *netdev, void wx_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +int wx_nway_reset(struct net_device *netdev); +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 83f9bb7b3c22..5b064c434053 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #define WX_NCSI_SUP 0x8000 @@ -939,6 +940,8 @@ struct wx { int speed; int duplex; struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; bool wol_hw_supported; bool ncsi_enabled; @@ -1044,4 +1047,9 @@ rd64(struct wx *wx, u32 reg) #define wx_dbg(wx, fmt, arg...) \ dev_dbg(&(wx)->pdev->dev, fmt, ##arg) +static inline struct wx *phylink_to_wx(struct phylink_config *config) +{ + return container_of(config, struct wx, phylink_config); +} + #endif /* _WX_TYPE_H_ */ -- Gitee From 6190900b0e150af4946156619be55d9100916351 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:48 +0800 Subject: [PATCH 554/953] net: txgbe: use phylink bits added in libwx ANBZ: #8484 commit 4491c602fe5f3a248cc8a2ed4180aacdc2162365 upstream. Convert txgbe to use phylink and phylink_config added in libwx. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20231222101639.1499997-3-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 29 ++----------- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 +--- .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 43 +++++++++---------- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 8 ---- 4 files changed, 26 insertions(+), 62 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 3f336a088e43..60f351a3b89d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -10,35 +10,12 @@ #include "txgbe_type.h" #include "txgbe_ethtool.h" -static int txgbe_nway_reset(struct net_device *netdev) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_nway_reset(txgbe->phylink); -} - -static int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); -} - -static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); -} - static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, - .nway_reset = txgbe_nway_reset, + .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = txgbe_get_link_ksettings, - .set_link_ksettings = txgbe_set_link_ksettings, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, .get_sset_count = wx_get_sset_count, .get_strings = wx_get_strings, .get_ethtool_stats = wx_get_ethtool_stats, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index a78da2309db5..1007ae2541ce 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx) static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; - struct txgbe *txgbe; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - txgbe = netdev_to_txgbe(netdev); - phylink_start(txgbe->phylink); + phylink_start(wx->phylink); /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -292,11 +290,9 @@ static void txgbe_disable_device(struct wx *wx) static void txgbe_down(struct wx *wx) { - struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); - txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(txgbe->phylink); + phylink_stop(wx->phylink); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index b95187a0847f..ef74b2667d5d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, phy_interface_t interface) { - struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); + struct txgbe *txgbe = wx->priv; if (interface == PHY_INTERFACE_MODE_10GBASER) return &txgbe->xpcs->pcs; @@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, static void txgbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); } @@ -186,7 +187,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; txcfg = rd32(wx, WX_MAC_TX_CFG); @@ -217,7 +218,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); @@ -228,7 +229,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); txgbe_enable_sec_tx_path(wx); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); @@ -253,10 +254,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) phy_interface_t phy_mode; struct phylink *phylink; - config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - + config = &wx->phylink_config; config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | @@ -287,7 +285,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) } } - txgbe->phylink = phylink; + wx->phylink = phylink; return 0; } @@ -483,7 +481,7 @@ static void txgbe_irq_handler(struct irq_desc *desc) TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); - phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); + phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); } /* unmask interrupt */ @@ -701,6 +699,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) int txgbe_init_phy(struct txgbe *txgbe) { + struct wx *wx = txgbe->wx; int ret; if (txgbe->wx->media_type == sp_media_copper) @@ -708,43 +707,43 @@ int txgbe_init_phy(struct txgbe *txgbe) ret = txgbe_swnodes_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register software nodes\n"); + wx_err(wx, "failed to register software nodes\n"); return ret; } ret = txgbe_mdio_pcs_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); + wx_err(wx, "failed to init mdio pcs: %d\n", ret); goto err_unregister_swnode; } ret = txgbe_phylink_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init phylink\n"); + wx_err(wx, "failed to init phylink\n"); goto err_destroy_xpcs; } ret = txgbe_gpio_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init gpio\n"); + wx_err(wx, "failed to init gpio\n"); goto err_destroy_phylink; } ret = txgbe_clock_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register clock: %d\n", ret); + wx_err(wx, "failed to register clock: %d\n", ret); goto err_destroy_phylink; } ret = txgbe_i2c_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); + wx_err(wx, "failed to init i2c interface: %d\n", ret); goto err_unregister_clk; } ret = txgbe_sfp_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register sfp\n"); + wx_err(wx, "failed to register sfp\n"); goto err_unregister_i2c; } @@ -756,7 +755,7 @@ int txgbe_init_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); err_destroy_phylink: - phylink_destroy(txgbe->phylink); + phylink_destroy(wx->phylink); err_destroy_xpcs: xpcs_destroy(txgbe->xpcs); err_unregister_swnode: @@ -768,8 +767,8 @@ int txgbe_init_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe) { if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->phylink); - phylink_destroy(txgbe->phylink); + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); return; } @@ -777,7 +776,7 @@ void txgbe_remove_phy(struct txgbe *txgbe) platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); - phylink_destroy(txgbe->phylink); + phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 3ba9ce43f394..5494ea88df0a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -129,13 +129,6 @@ extern char txgbe_driver_name[]; -static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - return wx->priv; -} - #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ @@ -175,7 +168,6 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct dw_xpcs *xpcs; - struct phylink *phylink; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; -- Gitee From eab79e157cd957622d2e7a14eb210b58798b8272 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:49 +0800 Subject: [PATCH 555/953] net: ngbe: convert phylib to phylink ANBZ: #8484 commit bc2426d74aa35cd8ec9c97a253ef57c2c5cd730c upstream. Implement phylink in ngbe driver, to handle phy uniformly for Wangxun ethernet devices. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-4-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 6 +- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 12 +- drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 114 +++++++++--------- drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h | 1 - 4 files changed, 70 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index afbdf6919071..0f87898a55b2 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -44,9 +44,9 @@ static int ngbe_set_wol(struct net_device *netdev, static const struct ethtool_ops ngbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .nway_reset = wx_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, .get_sset_count = wx_get_sset_count, diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a5c623fd023e..db5cae8384e5 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -336,7 +336,7 @@ static void ngbe_disable_device(struct wx *wx) static void ngbe_down(struct wx *wx) { - phy_stop(wx->phydev); + phylink_stop(wx->phylink); ngbe_disable_device(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -359,7 +359,7 @@ static void ngbe_up(struct wx *wx) if (wx->gpio_ctrl) ngbe_sfp_modules_txrx_powerctl(wx, true); - phy_start(wx->phydev); + phylink_start(wx->phylink); } /** @@ -388,7 +388,7 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_free_resources; - err = ngbe_phy_connect(wx); + err = phylink_connect_phy(wx->phylink, wx->phydev); if (err) goto err_free_irq; @@ -404,7 +404,7 @@ static int ngbe_open(struct net_device *netdev) return 0; err_dis_phy: - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); err_free_irq: wx_free_irq(wx); err_free_resources: @@ -430,7 +430,7 @@ static int ngbe_close(struct net_device *netdev) ngbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); return 0; @@ -681,6 +681,7 @@ static int ngbe_probe(struct pci_dev *pdev, return 0; err_register: + phylink_destroy(wx->phylink); wx_control_hw(wx, false); err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); @@ -710,6 +711,7 @@ static void ngbe_remove(struct pci_dev *pdev) netdev = wx->netdev; unregister_netdev(netdev); + phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 6302ecca71bb..cc75856f231a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -56,22 +56,26 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, return ret; } -static void ngbe_handle_link_change(struct net_device *dev) +static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct wx *wx = netdev_priv(dev); - struct phy_device *phydev; - u32 lan_speed, reg; +} + +static void ngbe_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ +} - phydev = wx->phydev; - if (!(wx->link != phydev->link || - wx->speed != phydev->speed || - wx->duplex != phydev->duplex)) - return; +static void ngbe_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); + u32 lan_speed, reg; - wx->link = phydev->link; - wx->speed = phydev->speed; - wx->duplex = phydev->duplex; - switch (phydev->speed) { + switch (speed) { case SPEED_10: lan_speed = 0; break; @@ -83,54 +87,51 @@ static void ngbe_handle_link_change(struct net_device *dev) lan_speed = 2; break; } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); - if (phydev->link) { - reg = rd32(wx, WX_MAC_TX_CFG); - reg &= ~WX_MAC_TX_CFG_SPEED_MASK; - reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; - wr32(wx, WX_MAC_TX_CFG, reg); - /* Re configure MAC RX */ - reg = rd32(wx, WX_MAC_RX_CFG); - wr32(wx, WX_MAC_RX_CFG, reg); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - reg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, reg); - } - phy_print_status(phydev); + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + + /* Re configure MAC Rx */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); } -int ngbe_phy_connect(struct wx *wx) +static const struct phylink_mac_ops ngbe_mac_ops = { + .mac_config = ngbe_mac_config, + .mac_link_down = ngbe_mac_link_down, + .mac_link_up = ngbe_mac_link_up, +}; + +static int ngbe_phylink_init(struct wx *wx) { - int ret; + struct phylink_config *config; + phy_interface_t phy_mode; + struct phylink *phylink; - ret = phy_connect_direct(wx->netdev, - wx->phydev, - ngbe_handle_link_change, - PHY_INTERFACE_MODE_RGMII_ID); - if (ret) { - wx_err(wx, "PHY connect failed.\n"); - return ret; - } + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->mac_managed_pm = true; - return 0; -} + phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces); -static void ngbe_phy_fixup(struct wx *wx) -{ - struct phy_device *phydev = wx->phydev; - struct ethtool_eee eee; - - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - phydev->mac_managed_pm = true; - if (wx->mac_type != em_mac_type_mdi) - return; - /* disable EEE, internal phy does not support eee */ - memset(&eee, 0, sizeof(eee)); - phy_ethtool_set_eee(phydev, &eee); + phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + wx->phylink = phylink; + + return 0; } int ngbe_mdio_init(struct wx *wx) @@ -165,11 +166,16 @@ int ngbe_mdio_init(struct wx *wx) return -ENODEV; phy_attached_info(wx->phydev); - ngbe_phy_fixup(wx); wx->link = 0; wx->speed = 0; wx->duplex = 0; + ret = ngbe_phylink_init(wx); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h index 0a6400dd89c4..f610b771888a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -7,6 +7,5 @@ #ifndef _NGBE_MDIO_H_ #define _NGBE_MDIO_H_ -int ngbe_phy_connect(struct wx *wx); int ngbe_mdio_init(struct wx *wx); #endif /* _NGBE_MDIO_H_ */ -- Gitee From afcdd61b8e21723810242005f62a4c66ad589b2c Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:50 +0800 Subject: [PATCH 556/953] net: wangxun: add flow control support ANBZ: #8484 commit 2fe2ca09da953bac778eab5dfb309b4e7d274b1a upstream. Add support to set pause params with ethtool -A and get pause params with ethtool -a, for ethernet driver txgbe and ngbe. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-5-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 18 ++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 172 ++++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 1 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 48 +++++ .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 2 + drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 2 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 2 + .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 2 + 9 files changed, 251 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 12feb8a5ee75..e4d2bbf7dad6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -211,3 +211,21 @@ int wx_set_link_ksettings(struct net_device *netdev, return phylink_ethtool_ksettings_set(wx->phylink, cmd); } EXPORT_SYMBOL(wx_set_link_ksettings); + +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + phylink_ethtool_get_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_get_pauseparam); + +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_set_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_set_pauseparam); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index f15cc445ae0f..7d3d85f212eb 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -18,4 +18,8 @@ int wx_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int wx_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 533e912af089..d11f7d8db194 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx) wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); } +#define WX_ETH_FRAMING 20 + +/** + * wx_hpbthresh - calculate high water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_hpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING; + tc = link; + + /* Calculate delay value for device */ + dv_id = WX_DV(link, tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = WX_BT2KB(dv_id); + rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + dev_warn(&wx->pdev->dev, + "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/** + * wx_lpbthresh - calculate low water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_lpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + u32 dv_id; + int tc; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = WX_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return WX_BT2KB(dv_id); +} + +/** + * wx_pbthresh_setup - calculate and setup high low water marks + * + * @wx: board private structure to calculate for + **/ +static void wx_pbthresh_setup(struct wx *wx) +{ + wx->fc.high_water = wx_hpbthresh(wx); + wx->fc.low_water = wx_lpbthresh(wx); + + /* Low water marks must not be larger than high water marks */ + if (wx->fc.low_water > wx->fc.high_water) + wx->fc.low_water = 0; +} + static void wx_configure_port(struct wx *wx) { u32 value, i; @@ -1584,6 +1659,7 @@ static void wx_configure_isb(struct wx *wx) void wx_configure(struct wx *wx) { wx_set_rxpba(wx); + wx_pbthresh_setup(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -2003,6 +2079,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl |= WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause) +{ + u16 pause_time = WX_DEFAULT_FCPAUSE; + u32 mflcn_reg, fccfg_reg, reg; + u32 fcrtl, fcrth; + int i; + + /* Low water mark of zero causes XOFF floods */ + if (tx_pause && wx->fc.high_water) { + if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) { + wx_err(wx, "Invalid water mark configuration\n"); + return -EINVAL; + } + } + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(wx, WX_RDB_RFCC); + fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X; + + if (rx_pause) + mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE; + if (tx_pause) + fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X; + + /* Set 802.3x based flow control settings. */ + wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(wx, WX_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (tx_pause && wx->fc.high_water) { + fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE; + wr32(wx, WX_RDB_RFCL, fcrtl); + fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE; + } else { + wr32(wx, WX_RDB_RFCL, 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576; + } + + wr32(wx, WX_RDB_RFCH, fcrth); + + /* Configure pause time */ + reg = pause_time * 0x00010001; + wr32(wx, WX_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(wx, WX_RDB_RFCRT, pause_time / 2); + + /* We should set the drop enable bit if: + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (wx->num_rx_queues > 1 && !tx_pause) { + for (i = 0; i < wx->num_rx_queues; i++) + wx_enable_rx_drop(wx, wx->rx_ring[i]); + } else { + for (i = 0; i < wx->num_rx_queues; i++) + wx_disable_rx_drop(wx, wx->rx_ring[i]); + } + + return 0; +} +EXPORT_SYMBOL(wx_fc_enable); + /** * wx_update_stats - Update the board statistics counters. * @wx: board private structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 12c20a7c364d..9e219fa717a2 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); void wx_update_stats(struct wx *wx); void wx_clear_hw_cntrs(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5b064c434053..561f752defec 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -131,6 +131,15 @@ #define WX_RDB_PFCMACDAH 0x19214 #define WX_RDB_LXOFFTXC 0x19218 #define WX_RDB_LXONTXC 0x1921C +/* Flow Control Registers */ +#define WX_RDB_RFCV 0x19200 +#define WX_RDB_RFCL 0x19220 +#define WX_RDB_RFCL_XONE BIT(31) +#define WX_RDB_RFCH 0x19260 +#define WX_RDB_RFCH_XOFFE BIT(31) +#define WX_RDB_RFCRT 0x192A0 +#define WX_RDB_RFCC 0x192A4 +#define WX_RDB_RFCC_RFCE_802_3X BIT(3) /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -331,6 +340,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) +#define WX_PX_RR_CFG_DROP_EN BIT(30) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) @@ -368,6 +378,38 @@ enum WX_MSCA_CMD_value { #define WX_MAC_STATE_MODIFIED 0x2 #define WX_MAC_STATE_IN_USE 0x4 +/* BitTimes (BT) conversion */ +#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define WX_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define WX_PFC_D 672 +/* Calculate Cable Delay */ +#define WX_CABLE_DC 5556 /* Delay Copper */ +/* Calculate Delay incurred from higher layer */ +#define WX_HD 6144 + +/* Calculate Interface Delay */ +#define WX_PHY_D 12800 +#define WX_MAC_D 4096 +#define WX_XAUI_D (2 * 1024) +#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D) +/* Calculate PCI Bus delay for low thresholds */ +#define WX_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define WX_DV(_max_frame_link, _max_frame_tc) \ + ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \ + (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \ + 2 * WX_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define WX_LOW_DV(_max_frame_tc) \ + (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1)) + +/* flow control */ +#define WX_DEFAULT_FCPAUSE 0xFFFF + #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 @@ -880,6 +922,11 @@ enum wx_isb_idx { WX_ISB_MAX }; +struct wx_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ +}; + /* Statistics counters collected by the MAC */ struct wx_hw_stats { u64 gprc; @@ -920,6 +967,7 @@ struct wx { enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_fc_info fc; struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 0f87898a55b2..9a89f9576180 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -54,6 +54,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .get_ethtool_stats = wx_get_ethtool_stats, .get_eth_mac_stats = wx_get_mac_stats, .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index cc75856f231a..ec54b18c5fe7 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -75,6 +75,8 @@ static void ngbe_mac_link_up(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); u32 lan_speed, reg; + wx_fc_enable(wx, tx_pause, rx_pause); + switch (speed) { case SPEED_10: lan_speed = 0; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 60f351a3b89d..cdaa19528248 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -21,6 +21,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_ethtool_stats = wx_get_ethtool_stats, .get_eth_mac_stats = wx_get_mac_stats, .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index ef74b2667d5d..328d9a9eb3db 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -190,6 +190,8 @@ static void txgbe_mac_link_up(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; + wx_fc_enable(wx, tx_pause, rx_pause); + txcfg = rd32(wx, WX_MAC_TX_CFG); txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; -- Gitee From 8f558752bd10b2b5abc9a8846505fb00221878a6 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:51 +0800 Subject: [PATCH 557/953] net: wangxun: add ethtool_ops for ring parameters ANBZ: #8484 commit 883b5984a5d2900468af5ab979cae90547a78da4 upstream. Support to query RX/TX depth with ethtool -g, and change RX/TX depth with ethtool -G. Signed-off-by: Jiawen Wu Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20231212080438.1361308-6-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 18 +++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 ++ drivers/net/ethernet/wangxun/libwx/wx_lib.c | 66 +++++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_lib.h | 2 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 6 ++ .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 53 +++++++++++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 4 +- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 50 ++++++++++++++ .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 ++- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 3 + 11 files changed, 214 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index e4d2bbf7dad6..77da6111fbce 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -229,3 +229,21 @@ int wx_set_pauseparam(struct net_device *netdev, return phylink_ethtool_set_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_set_pauseparam); + +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ring->rx_max_pending = WX_MAX_RXD; + ring->tx_max_pending = WX_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = wx->rx_ring_count; + ring->tx_pending = wx->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +EXPORT_SYMBOL(wx_get_ringparam); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 7d3d85f212eb..7651ec4b7dd9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -22,4 +22,8 @@ void wx_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause); int wx_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause); +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 347d3cec02a3..b0b1ac545d5d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2671,4 +2671,70 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) } EXPORT_SYMBOL(wx_set_features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring) +{ + int i, err = 0; + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != wx->tx_ring_count) { + for (i = 0; i < wx->num_tx_queues; i++) { + memcpy(&temp_ring[i], wx->tx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_tx_count; + err = wx_setup_tx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new tx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_tx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + wx_free_tx_resources(wx->tx_ring[i]); + + memcpy(wx->tx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != wx->rx_ring_count) { + for (i = 0; i < wx->num_rx_queues; i++) { + memcpy(&temp_ring[i], wx->rx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_rx_count; + err = wx_setup_rx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new rx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_rx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_rx_queues; i++) { + wx_free_rx_resources(wx->rx_ring[i]); + memcpy(wx->rx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->rx_ring_count = new_rx_count; + } +} +EXPORT_SYMBOL(wx_set_ring); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index df1f4a5951f0..af1381c13d9e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -29,5 +29,7 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring); #endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 561f752defec..24588bc1eb57 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -412,6 +412,12 @@ enum WX_MSCA_CMD_value { #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 +#define WX_MIN_RXD 128 +#define WX_MIN_TXD 128 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define VMDQ_P(p) p diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 9a89f9576180..52d4167dcabe 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -7,7 +7,10 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" #include "ngbe_ethtool.h" +#include "ngbe_type.h" static void ngbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -41,6 +44,54 @@ static int ngbe_set_wol(struct net_device *netdev, return 0; } +static int ngbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + ngbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + wx_configure(wx); + ngbe_up(wx); + + return 0; +} + static const struct ethtool_ops ngbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, @@ -56,6 +107,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .get_pause_stats = wx_get_pause_stats, .get_pauseparam = wx_get_pauseparam, .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = ngbe_set_ringparam, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index db5cae8384e5..96d80c595cb8 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -334,7 +334,7 @@ static void ngbe_disable_device(struct wx *wx) wx_update_stats(wx); } -static void ngbe_down(struct wx *wx) +void ngbe_down(struct wx *wx) { phylink_stop(wx->phylink); ngbe_disable_device(wx); @@ -342,7 +342,7 @@ static void ngbe_down(struct wx *wx) wx_clean_all_rx_rings(wx); } -static void ngbe_up(struct wx *wx) +void ngbe_up(struct wx *wx) { wx_configure_vectors(wx); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index ff754d69bdf6..0a98080a197a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -130,4 +130,7 @@ extern char ngbe_driver_name[]; +void ngbe_down(struct wx *wx); +void ngbe_up(struct wx *wx); + #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index cdaa19528248..bd817248a831 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -7,9 +7,57 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" #include "txgbe_type.h" #include "txgbe_ethtool.h" +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + txgbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + txgbe_up(wx); + + return 0; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, @@ -23,6 +71,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_pause_stats = wx_get_pause_stats, .get_pauseparam = wx_get_pauseparam, .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = txgbe_set_ringparam, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 1007ae2541ce..bcc47bc6264a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -288,7 +288,7 @@ static void txgbe_disable_device(struct wx *wx) wx_update_stats(wx); } -static void txgbe_down(struct wx *wx) +void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); @@ -298,6 +298,12 @@ static void txgbe_down(struct wx *wx) wx_clean_all_rx_rings(wx); } +void txgbe_up(struct wx *wx) +{ + wx_configure(wx); + txgbe_up_complete(wx); +} + /** * txgbe_init_type_code - Initialize the shared code * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 5494ea88df0a..801fd0aed1ff 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -129,6 +129,9 @@ extern char txgbe_driver_name[]; +void txgbe_down(struct wx *wx); +void txgbe_up(struct wx *wx); + #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ -- Gitee From 447c4d6795106821575bac5d23004723291ec0c0 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:52 +0800 Subject: [PATCH 558/953] net: wangxun: add coalesce options support ANBZ: #8484 commit 4ac2d9dff4b01fb210f951dcb67badcc2a1aa427 upstream. Support to show RX/TX coalesce with ethtool -c and set RX/TX coalesce with ethtool -C. Signed-off-by: Jiawen Wu Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-7-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 101 ++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 8 ++ drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2 +- drivers/net/ethernet/wangxun/libwx/wx_lib.h | 1 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 + .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 4 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 4 + 7 files changed, 120 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 77da6111fbce..ccc3f1697a76 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -8,6 +8,7 @@ #include "wx_type.h" #include "wx_ethtool.h" #include "wx_hw.h" +#include "wx_lib.h" struct wx_stats { char stat_string[ETH_GSTRING_LEN]; @@ -247,3 +248,103 @@ void wx_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } EXPORT_SYMBOL(wx_get_ringparam); + +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; + /* only valid if in constant ITR mode */ + if (wx->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = wx->rx_itr_setting; + else + ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (wx->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = wx->tx_itr_setting; + else + ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; + + return 0; +} +EXPORT_SYMBOL(wx_get_coalesce); + +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u16 tx_itr_param, rx_itr_param; + struct wx_q_vector *q_vector; + u16 max_eitr; + int i; + + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EOPNOTSUPP; + } + + if (ec->tx_max_coalesced_frames_irq) + wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if (wx->mac.type == wx_mac_sp) + max_eitr = WX_SP_MAX_EITR; + else + max_eitr = WX_EM_MAX_EITR; + + if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || + (ec->tx_coalesce_usecs > (max_eitr >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + wx->rx_itr_setting = ec->rx_coalesce_usecs; + + if (wx->rx_itr_setting == 1) + rx_itr_param = WX_20K_ITR; + else + rx_itr_param = wx->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + wx->tx_itr_setting = ec->tx_coalesce_usecs; + + if (wx->tx_itr_setting == 1) { + if (wx->mac.type == wx_mac_sp) + tx_itr_param = WX_12K_ITR; + else + tx_itr_param = WX_20K_ITR; + } else { + tx_itr_param = wx->tx_itr_setting; + } + + /* mixed Rx/Tx */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + wx->tx_itr_setting = wx->rx_itr_setting; + + for (i = 0; i < wx->num_q_vectors; i++) { + q_vector = wx->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + wx_write_eitr(q_vector); + } + + return 0; +} +EXPORT_SYMBOL(wx_set_coalesce); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 7651ec4b7dd9..3cd0495a6fbb 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -26,4 +26,12 @@ void wx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack); +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index b0b1ac545d5d..b738884c901a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2082,7 +2082,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -static void wx_write_eitr(struct wx_q_vector *q_vector) +void wx_write_eitr(struct wx_q_vector *q_vector) { struct wx *wx = q_vector->wx; int v_idx = q_vector->v_idx; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index af1381c13d9e..ec909e876720 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx); int wx_setup_isb_resources(struct wx *wx); void wx_free_isb_resources(struct wx *wx); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_write_eitr(struct wx_q_vector *q_vector); void wx_configure_vectors(struct wx *wx); void wx_clean_all_rx_rings(struct wx *wx); void wx_clean_all_tx_rings(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 24588bc1eb57..17cdffe388d0 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -315,6 +315,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ #define WX_7K_ITR 595 #define WX_12K_ITR 336 +#define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_EM_MAX_EITR 0x00007FFCU diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 52d4167dcabe..81cb1c23fa84 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -93,6 +93,8 @@ static int ngbe_set_ringparam(struct net_device *netdev, } static const struct ethtool_ops ngbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = wx_get_link_ksettings, @@ -109,6 +111,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_pauseparam = wx_set_pauseparam, .get_ringparam = wx_get_ringparam, .set_ringparam = ngbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index bd817248a831..9a6856cca411 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -59,6 +59,8 @@ static int txgbe_set_ringparam(struct net_device *netdev, } static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, @@ -73,6 +75,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_pauseparam = wx_set_pauseparam, .get_ringparam = wx_get_ringparam, .set_ringparam = txgbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, }; void txgbe_set_ethtool_ops(struct net_device *netdev) -- Gitee From 13302d58dc04213612d8a239033c4d1b8977c294 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:54 +0800 Subject: [PATCH 559/953] net: wangxun: add ethtool_ops for msglevel ANBZ: #8484 commit b746dc6bdde5a9a03309f208733a08665d4a0cb4 Add support to get and set msglevel for driver txgbe and ngbe. Signed-off-by: Jiawen Wu Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-9-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_ethtool.c | 16 ++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_ethtool.h | 2 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 2 ++ .../net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 2 ++ 4 files changed, 22 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index ccc3f1697a76..f3c7e19dff5c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -348,3 +348,19 @@ int wx_set_coalesce(struct net_device *netdev, return 0; } EXPORT_SYMBOL(wx_set_coalesce); + +u32 wx_get_msglevel(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return wx->msg_enable; +} +EXPORT_SYMBOL(wx_get_msglevel); + +void wx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct wx *wx = netdev_priv(netdev); + + wx->msg_enable = data; +} +EXPORT_SYMBOL(wx_set_msglevel); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 3cd0495a6fbb..d79157532d3d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -34,4 +34,6 @@ int wx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack); +u32 wx_get_msglevel(struct net_device *netdev); +void wx_set_msglevel(struct net_device *netdev, u32 data); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 81cb1c23fa84..5800bd8c8696 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -113,6 +113,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_ringparam = ngbe_set_ringparam, .get_coalesce = wx_get_coalesce, .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 9a6856cca411..fa83cac320d3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -77,6 +77,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_ringparam = txgbe_set_ringparam, .get_coalesce = wx_get_coalesce, .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void txgbe_set_ethtool_ops(struct net_device *netdev) -- Gitee From 17ee2e2efd6141f4d0a57ce00a1b007107383f86 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 11 Jan 2024 17:27:53 +0100 Subject: [PATCH 560/953] wangxun: select CONFIG_PHYLINK where needed ANBZ: #8484 commit b3739fb3a9e6633b233d829ee799323d75162775 upstream. The ngbe driver needs phylink: arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/libwx/wx_ethtool.o: in function `wx_nway_reset': wx_ethtool.c:(.text+0x458): undefined reference to `phylink_ethtool_nway_reset' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_main.o: in function `ngbe_remove': ngbe_main.c:(.text+0x7c): undefined reference to `phylink_destroy' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_main.o: in function `ngbe_open': ngbe_main.c:(.text+0xf90): undefined reference to `phylink_connect_phy' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.o: in function `ngbe_mdio_init': ngbe_mdio.c:(.text+0x314): undefined reference to `phylink_create' Add the missing Kconfig description for this. Fixes: bc2426d74aa3 ("net: ngbe: convert phylib to phylink") Signed-off-by: Arnd Bergmann Reviewed-by: Andrew Lunn Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20240111162828.68564-1-arnd@kernel.org Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 23cd610bd376..85cdbdd44fec 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -26,7 +26,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX - select PHYLIB + select PHYLINK help This driver supports Wangxun(R) GbE PCI Express family of adapters. -- Gitee From 3371a221c17d357941e1a811666e5e187f8bc00d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2024 11:33:11 -0800 Subject: [PATCH 561/953] net: fill in MODULE_DESCRIPTION()s for wx_lib ANBZ: #8484 commit 907ee6681788556b9ade3ad0a1f6f4aea192399c stream. W=1 builds now warn if module is built without a MODULE_DESCRIPTION(). Add a description to Wangxun's common code lib. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240111193311.4152859-1-kuba@kernel.org Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index b738884c901a..0481f646a303 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2737,4 +2737,5 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count, } EXPORT_SYMBOL(wx_set_ring); +MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); -- Gitee From 247b6cf8b8138b345822c0585d04e8a62ce97211 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Wed, 27 Mar 2024 17:57:48 +0800 Subject: [PATCH 562/953] anolis: net: txgbe: fix i2c dev name cannot match clkdev ANBZ: #8484 txgbe clkdev shortened clk_name, so i2c_dev info_name also need to shorten. Otherwise, i2c_dev cannot initialize clock. Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 328d9a9eb3db..129cd5042180 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,6 +20,8 @@ #include "txgbe_phy.h" #include "txgbe_hw.h" +#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw" + static int txgbe_swnodes_register(struct txgbe *txgbe) { struct txgbe_nodes *nodes = &txgbe->nodes; @@ -551,8 +553,8 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d", - pci_dev_id(pdev)); + snprintf(clk_name, sizeof(clk_name), "%s.%d", + TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -614,7 +616,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = "i2c_designware"; + info.name = TXGBE_I2C_CLK_DEV_NAME; info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); -- Gitee From fd8c1ee81c4348e23fa30c95425be1433ed88df8 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 8 Mar 2024 15:42:09 +0000 Subject: [PATCH 563/953] x86/resctrl: Fix allocation of cleanest CLOSID on platforms with no monitors ANBZ: #8626 commit 21f4744007624235765b7b27798bf4de86da99d8 morse-linux. commit 6eac36bb9eb0 ("x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid") added a Kconfig option that causes resctrl to search for the CLOSID with the fewest dirty cache lines when creating a new control group. This depends on the values read from the llc_occupancy counters. This support missed that some platforms may not have these counters. This causes a NULL pointer dereference when creating a new control group as the array was not allocated by dom_data_init(). As this feature isn't necessary on platforms that don't have cache occupancy monitors, add this to the check that occurs when a new control group is allocated. The existing code is not selected by any upstream platform, it makes no sense to backport this patch to stable. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 011e17efb1a6..1767c1affa60 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -149,7 +149,8 @@ static int closid_alloc(void) lockdep_assert_held(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; -- Gitee From 4747a72e2ceb454d2a27f39aafd7b6b49f758020 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 10:45:43 +0000 Subject: [PATCH 564/953] x86/resctrl: Add a helper to avoid reaching into the arch code resource list ANBZ: #8626 commit 5b1da243d13f6237f332163d1bae325c36648a6b morse-linux. Resctrl occasionally wants to know something about a specific resource, in these cases it reaches into the arch code's rdt_resources_all[] array. Once the filesystem parts of resctrl are moved to /fs/, this means it will need visibility of the architecture specific struct resctrl_hw_resource definition, and the array of all resources. All architectures would also need a r_resctrl member in this struct. Instead, abstract this via a helper to allow architectures to do different things here. Move the level enum to the resctrl header and add a helper to retrieve the struct rdt_resource by 'rid'. resctrl_arch_get_resource() should not return NULL for any value in the enum, it may instead return a dummy resource that is !alloc_enabled && !mon_enabled. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 10 +++++++++- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 10 ---------- arch/x86/kernel/cpu/resctrl/monitor.c | 8 ++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 +++++++-------- include/linux/resctrl.h | 17 +++++++++++++++++ 6 files changed, 38 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 8702d34711df..1644f37e23c4 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -122,6 +122,14 @@ struct rdt_hw_resource rdt_resources_all[] = { }, }; +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &rdt_resources_all[l].r_resctrl; +} + /* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. @@ -169,7 +177,7 @@ static inline void cache_alloc_hsw_probe(void) bool is_mba_sc(struct rdt_resource *r) { if (!r) - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); /* * The software controller support is only applicable to MBA resource. diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 7997b47743a2..788ac0395645 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -599,7 +599,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) domid = md.u.domid; evtid = md.u.evtid; - r = &rdt_resources_all[resid].r_resctrl; + r = resctrl_arch_get_resource(resid); d = rdt_find_domain(r, domid, NULL); if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index c99f26ebe7a6..65990def6c79 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -466,16 +466,6 @@ extern struct rdt_hw_resource rdt_resources_all[]; extern struct rdtgroup rdtgroup_default; extern struct dentry *debugfs_resctrl; -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; - static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c34a35ec0f03..06565153ceb2 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -321,7 +321,7 @@ static void limbo_release_entry(struct rmid_entry *entry) */ void __check_limbo(struct rdt_domain *d, bool force_free) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; u32 idx, cur_idx = 1; @@ -467,7 +467,7 @@ int alloc_rmid(u32 closid) static void add_rmid_to_limbo(struct rmid_entry *entry) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdt_domain *d; u32 idx; @@ -669,7 +669,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) if (!is_mbm_local_enabled()) return; - r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); closid = rgrp->closid; rmid = rgrp->mon.rmid; @@ -839,7 +839,7 @@ void mbm_handle_overflow(struct work_struct *work) if (!resctrl_mounted || !resctrl_arch_mon_capable()) goto out_unlock; - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1767c1affa60..45372b6a6215 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2253,7 +2253,7 @@ static void l2_qos_cfg_update(void *arg) static inline bool is_mba_linear(void) { - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; } static int set_cache_qos_cfg(int level, bool enable) @@ -2341,7 +2341,7 @@ static void mba_sc_domain_destroy(struct rdt_resource *r, */ static bool supports_mba_mbps(void) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); return (is_mbm_local_enabled() && r->alloc_capable && is_mba_linear()); @@ -2353,7 +2353,7 @@ static bool supports_mba_mbps(void) */ static int set_mba_sc(bool mba_sc) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); u32 num_closid = resctrl_arch_get_num_closid(r); struct rdt_domain *d; int i; @@ -2625,10 +2625,10 @@ static void schemata_list_destroy(void) static int rdt_get_tree(struct fs_context *fc) { + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdt_fs_context *ctx = rdt_fc2context(fc); unsigned long flags = RFTYPE_CTRL_BASE; struct rdt_domain *dom; - struct rdt_resource *r; int ret; cpus_read_lock(); @@ -2701,8 +2701,7 @@ static int rdt_get_tree(struct fs_context *fc) resctrl_mounted = true; if (is_mbm_enabled()) { - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - list_for_each_entry(dom, &r->domains, list) + list_for_each_entry(dom, &l3->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); } @@ -3878,7 +3877,7 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) seq_puts(seq, ",cdpl2"); - if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) seq_puts(seq, ",mba_MBps"); if (resctrl_debug) @@ -4068,7 +4067,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) void resctrl_offline_cpu(unsigned int cpu) { - struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdtgroup *rdtgrp; struct rdt_domain *d; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index a365f67131ec..168cc9510069 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -36,6 +36,16 @@ enum resctrl_conf_type { CDP_DATA, }; +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + #define CDP_NUM_TYPES (CDP_DATA + 1) /* @@ -190,6 +200,13 @@ struct rdt_resource { bool cdp_capable; }; +/* + * Get the resource that exists at this level. If the level is not supported + * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES + * will return NULL. + */ +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); + /** * struct resctrl_schema - configuration abilities of a resource presented to * user-space -- Gitee From ab28f87248e4296e83f697af760d6c9e8fab19b5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:29:13 +0000 Subject: [PATCH 565/953] x86/resctrl: Move ctrlval string parsing policy away from the arch code ANBZ: #8626 commit f8546bea270343d2c81e5f3542d7f01dc24125dc morse-linux. The policy for parsing the configuration values as a string from user-space is specified by a function pointer the arch code specifies. These strings are part of resctrl's ABI, and the functions and their caller both live in the same file. Exporting the parsing functions and allowing the architecture to choose how a schema is parsed allows an architecture to get this wrong. Keep this all in the flesystem parts of resctrl. This should prevent any architecture's string-parsing behaviour from varying without core code changes. Use the fflags to spot caches and bandwidth resources, and use the appropriate helper. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 4 ---- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 28 +++++++++++++++++++---- arch/x86/kernel/cpu/resctrl/internal.h | 10 -------- include/linux/resctrl.h | 7 ------ 4 files changed, 23 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 1644f37e23c4..65e3e6d44118 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -75,7 +75,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L3", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_L3), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -89,7 +88,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L2", .cache_level = 2, .domains = domain_init(RDT_RESOURCE_L2), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -103,7 +101,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "MB", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_MBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -115,7 +112,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "SMBA", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_SMBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 788ac0395645..72a651671c68 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,6 +23,15 @@ #include "internal.h" +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + /* * Check whether MBA bandwidth percentage value is correct. The value is * checked against the minimum and max bandwidth values specified by the @@ -59,8 +68,8 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) { struct resctrl_staged_config *cfg; u32 closid = data->rdtgrp->closid; @@ -138,8 +147,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) { struct rdtgroup *rdtgrp = data->rdtgrp; struct resctrl_staged_config *cfg; @@ -195,6 +204,14 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, return 0; } +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + /* * For each domain in this resource we expect to find a series of: * id=mask @@ -204,6 +221,7 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, static int parse_line(char *line, struct resctrl_schema *s, struct rdtgroup *rdtgrp) { + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); enum resctrl_conf_type t = s->conf_type; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; @@ -235,7 +253,7 @@ static int parse_line(char *line, struct resctrl_schema *s, if (d->id == dom_id) { data.buf = dom; data.rdtgrp = rdtgrp; - if (r->parse_ctrlval(&data, s, d)) + if (parse_ctrlval(&data, s, d)) return -EINVAL; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { cfg = &d->staged_config[t]; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 65990def6c79..9048bd32e86f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -413,11 +413,6 @@ static inline bool is_mbm_event(int e) e <= QOS_L3_MBM_LOCAL_EVENT_ID); } -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. @@ -455,11 +450,6 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); - extern struct mutex rdtgroup_mutex; extern struct rdt_hw_resource rdt_resources_all[]; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 168cc9510069..6e87bc95f5ea 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -157,9 +157,6 @@ struct resctrl_membw { u32 *mb_map; }; -struct rdt_parse_data; -struct resctrl_schema; - /** * struct rdt_resource - attributes of a resctrl resource * @rid: The index of the resource @@ -174,7 +171,6 @@ struct resctrl_schema; * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. * @format_str: Per resource format string to show domain value - * @parse_ctrlval: Per resource function pointer to parse control values * @evt_list: List of monitoring events * @fflags: flags to choose base and info files * @cdp_capable: Is the CDP feature available on this resource @@ -192,9 +188,6 @@ struct rdt_resource { int data_width; u32 default_ctrl; const char *format_str; - int (*parse_ctrlval)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); struct list_head evt_list; unsigned long fflags; bool cdp_capable; -- Gitee From d25e3fea15b9865e9f1a1ec46a93ac60cff1359a Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 14 Jun 2018 13:52:30 +0100 Subject: [PATCH 566/953] x86/resctrl: Add helper for setting CPU default properties ANBZ: #8626 commit 8bd9128d2be50d27de7a2e1c694f7960862f43c2 morse-linux. rdtgroup_rmdir_ctrl() and rdtgroup_rmdir_mon() set the per-CPU pqr_state for CPUs that were part of the rmdir()'d group. Another architecture might not have a 'pqr_state', its hardware may need the values in a different format. MPAM's equivalent of RMID values are not unique, and always need the CLOSID to be provided too. There is only one caller that modifies a single value, (rdtgroup_rmdir_mon()). MPAM always needs both CLOSID and RMID for the hardware value as these are written to the same system register. As rdtgroup_rmdir_mon() has the CLOSID on hand, only provide a helper to set both values. These values are read by __resctrl_sched_in(), but may be written by a different CPU without any locking, add READ/WRTE_ONCE() to avoid torn values. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 14 +++++++++++--- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 12dbd2588ca7..f61382258743 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -4,8 +4,9 @@ #ifdef CONFIG_X86_CPU_RESCTRL -#include #include +#include +#include /* * This value can never be a valid CLOSID, and is used when mapping a @@ -96,8 +97,8 @@ static inline void resctrl_arch_disable_mon(void) static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - u32 closid = state->default_closid; - u32 rmid = state->default_rmid; + u32 closid = READ_ONCE(state->default_closid); + u32 rmid = READ_ONCE(state->default_rmid); u32 tmp; /* @@ -132,6 +133,13 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } +static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, + u32 rmid) +{ + WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid); + WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid); +} + static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 45372b6a6215..5d2c1ce5b6b1 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3623,14 +3623,18 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; int cpu; /* Give any tasks back to the parent group */ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; for_each_cpu(cpu, &rdtgrp->cpu_mask) - per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + /* * Update the MSR on moved CPUs and CPUs which have moved * task running on them. @@ -3663,6 +3667,7 @@ static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { + u32 closid, rmid; int cpu; /* Give any tasks back to the default group */ @@ -3673,10 +3678,10 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); /* Update per cpu closid and rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) { - per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; - per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; - } + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); /* * Update the MSR on moved CPUs and CPUs which have moved -- Gitee From 0816830b80fb8eb4090f10be4e7773e2e00beaae Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:40:49 +0000 Subject: [PATCH 567/953] x86/resctrl: Remove rdtgroup from update_cpu_closid_rmid() ANBZ: #8626 commit 5c5385f5f23f19b7a05451798eab277ff77abccc morse-linux. update_cpu_closid_rmid() takes a struct rdtgroup as an argument, which it uses to update the local CPUs default pqr values. This is a problem once the resctrl parts move out to /fs/, as the arch code cannot poke around inside struct rdtgroup. Rename update_cpu_closid_rmid() as resctrl_arch_sync_cpus_defaults() to be used as the target of an IPI, and pass the effective CLOSID and RMID in a new struct. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 19 +++++++++++++++---- include/linux/resctrl.h | 11 +++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5d2c1ce5b6b1..18f097fce51e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -341,13 +341,13 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. */ -static void update_cpu_closid_rmid(void *info) +void resctrl_arch_sync_cpu_defaults(void *info) { - struct rdtgroup *r = info; + struct resctrl_cpu_sync *r = info; if (r) { this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->mon.rmid); + this_cpu_write(pqr_state.default_rmid, r->rmid); } /* @@ -362,11 +362,22 @@ static void update_cpu_closid_rmid(void *info) * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, * * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. */ static void update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) { - on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); } static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 6e87bc95f5ea..2b79e4159507 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -220,6 +220,17 @@ struct resctrl_schema { u32 num_closid; }; +struct resctrl_cpu_sync { + u32 closid; + u32 rmid; +}; + +/* + * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to + * struct resctrl_cpu_sync, or NULL. + */ +void resctrl_arch_sync_cpu_defaults(void *info); + /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); -- Gitee From f99863d1c15cfea43b340340db2e3663631760db Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:45:54 +0000 Subject: [PATCH 568/953] x86/resctrl: Export resctrl fs's init function ANBZ: #8626 commit 208dada2dd57b7d887e96a51a4bf75be6f6d2065 morse-linux. rdtgroup_init() needs exporting so that arch code can call it once it lives in core code. As this is one of the few functions we export, rename it to have the resctrl in the name. The same goes for the exit call. x86's arch code init functions for RDT are renamed to have an arch prefix to make it clear these are part of the architecture code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 12 ++++++------ arch/x86/kernel/cpu/resctrl/internal.h | 3 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- include/linux/resctrl.h | 3 +++ 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 65e3e6d44118..29d19380d725 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -942,7 +942,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) } } -static int __init resctrl_late_init(void) +static int __init resctrl_arch_late_init(void) { struct rdt_resource *r; int state, ret; @@ -967,7 +967,7 @@ static int __init resctrl_late_init(void) if (state < 0) return state; - ret = rdtgroup_init(); + ret = resctrl_init(); if (ret) { cpuhp_remove_state(state); return ret; @@ -983,18 +983,18 @@ static int __init resctrl_late_init(void) return 0; } -late_initcall(resctrl_late_init); +late_initcall(resctrl_arch_late_init); -static void __exit resctrl_exit(void) +static void __exit resctrl_arch_exit(void) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; cpuhp_remove_state(rdt_online); - rdtgroup_exit(); + resctrl_exit(); if (r->mon_capable) rdt_put_mon_l3_config(); } -__exitcall(resctrl_exit); +__exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 9048bd32e86f..7c073298aabf 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -300,9 +300,6 @@ extern struct list_head rdt_all_groups; extern int max_name_width, max_data_width; -int __init rdtgroup_init(void); -void __exit rdtgroup_exit(void); - /** * struct rftype - describe each file in the resctrl file system * @name: File name diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 18f097fce51e..1a49c9918f8d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4116,14 +4116,14 @@ void resctrl_offline_cpu(unsigned int cpu) } /* - * rdtgroup_init - rdtgroup initialization + * resctrl_init - resctrl filesystem initialization * * Setup resctrl file system including set up root, create mount point, - * register rdtgroup filesystem, and initialize files under root directory. + * register resctrl filesystem, and initialize files under root directory. * * Return: 0 on success or -errno */ -int __init rdtgroup_init(void) +int __init resctrl_init(void) { int ret = 0; @@ -4171,7 +4171,7 @@ int __init rdtgroup_init(void) return ret; } -void __exit rdtgroup_exit(void) +void __exit resctrl_exit(void) { debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 2b79e4159507..f6a4b75f8122 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -325,4 +325,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; +int __init resctrl_init(void); +void __exit resctrl_exit(void); + #endif /* _RESCTRL_H */ -- Gitee From d78af7b9436c6cc588f65e4c9f9778e301f69c1b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 17:09:15 +0000 Subject: [PATCH 569/953] x86/resctrl: Wrap resctrl_arch_find_domain() around rdt_find_domain() ANBZ: #8626 commit 221cd020d3775fbd226d77e5fdb09247ba93127e morse-linux. rdt_find_domain() finds a domain given a resource and a cache-id. It's not quite right for the resctrl arch API as it also returns the position to insert a new domain, which is needed when bringing a domain online in the arch code. Wrap rdt_find_domain() in a another function resctrl_arch_find_domain() so we avoid the unnecessary argument outside the arch code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 9 +++++++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 2 -- include/linux/resctrl.h | 2 ++ 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 29d19380d725..a31bbad61b2e 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -405,8 +405,8 @@ void rdt_ctrl_update(void *arg) * caller, return the first domain whose id is bigger than the input id. * The domain list is sorted by id in ascending order. */ -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos) +static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, + struct list_head **pos) { struct rdt_domain *d; struct list_head *l; @@ -430,6 +430,11 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + return rdt_find_domain(r, id, NULL); +} + static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 72a651671c68..3603ade95f1d 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -618,7 +618,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) evtid = md.u.evtid; r = resctrl_arch_get_resource(resid); - d = rdt_find_domain(r, domid, NULL); + d = resctrl_arch_find_domain(r, domid); if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 7c073298aabf..32ade929ea1b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -533,8 +533,6 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn); int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, umode_t mask); -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos); ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); int rdtgroup_schemata_show(struct kernfs_open_file *of, diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f6a4b75f8122..c5fcbb524136 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -233,6 +233,8 @@ void resctrl_arch_sync_cpu_defaults(void *info); /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); /* -- Gitee From 1dfb99b5643211ba825a2d4519522936a19ff846 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 13:24:07 +0000 Subject: [PATCH 570/953] x86/resctrl: Move resctrl types to a separate header ANBZ: #8626 commit 80afaf082df1ccf983bbebba6b5037d20dd8a832 morse-linux. To avoid sticky problems in the mpam glue code, move the resctrl enums into a separate header. This lets the arch code declare prototypes that use these enums without creating a loop via asm<->linux resctrl.h The same logic applies to the monitor-configuration defines, move these too. The maintainers entry for these headers was missed when resctrl.h was created. Add a wildcard entry to match both resctrl.h and resctrl_types.h. Signed-off-by: James Morse --- internal.h lacks a copyright notice so there is nothing to preserve when creating a new file... [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- MAINTAINERS | 1 + arch/x86/kernel/cpu/resctrl/internal.h | 24 --------- include/linux/resctrl.h | 35 +------------ include/linux/resctrl_types.h | 68 ++++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 58 deletions(-) create mode 100644 include/linux/resctrl_types.h diff --git a/MAINTAINERS b/MAINTAINERS index f3f654630e1d..e9339a99862e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18075,6 +18075,7 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ READ-COPY UPDATE (RCU) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 32ade929ea1b..031948322eab 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -32,30 +32,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/* Reads to Local DRAM Memory */ -#define READS_TO_LOCAL_MEM BIT(0) - -/* Reads to Remote DRAM Memory */ -#define READS_TO_REMOTE_MEM BIT(1) - -/* Non-Temporal Writes to Local Memory */ -#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) - -/* Non-Temporal Writes to Remote Memory */ -#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) - -/* Reads to Local Memory the system identifies as "Slow Memory" */ -#define READS_TO_LOCAL_S_MEM BIT(4) - -/* Reads to Remote Memory the system identifies as "Slow Memory" */ -#define READS_TO_REMOTE_S_MEM BIT(5) - -/* Dirty Victims to All Types of Memory */ -#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) - -/* Max event bits supported */ -#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) - /** * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that * aren't marked nohz_full diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index c5fcbb524136..b0ee7256e095 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -5,6 +5,7 @@ #include #include #include +#include /* CLOSID, RMID value used by the default control group */ #define RESCTRL_RESERVED_CLOSID 0 @@ -24,40 +25,6 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/** - * enum resctrl_conf_type - The type of configuration. - * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. - * @CDP_CODE: Configuration applies to instruction fetches. - * @CDP_DATA: Configuration applies to reads and writes. - */ -enum resctrl_conf_type { - CDP_NONE, - CDP_CODE, - CDP_DATA, -}; - -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; - -#define CDP_NUM_TYPES (CDP_DATA + 1) - -/* - * Event IDs, the values match those used to program IA32_QM_EVTSEL before - * reading IA32_QM_CTR on RDT systems. - */ -enum resctrl_event_id { - QOS_L3_OCCUP_EVENT_ID = 0x01, - QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, - QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, -}; - /** * struct resctrl_staged_config - parsed configuration to be applied * @new_ctrl: new ctrl value to be loaded diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h new file mode 100644 index 000000000000..4788bd95dac6 --- /dev/null +++ b/include/linux/resctrl_types.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Arm Ltd. + * Based on arch/x86/kernel/cpu/resctrl/internal.h + */ + +#ifndef __LINUX_RESCTRL_TYPES_H +#define __LINUX_RESCTRL_TYPES_H + +/* Reads to Local DRAM Memory */ +#define READS_TO_LOCAL_MEM BIT(0) + +/* Reads to Remote DRAM Memory */ +#define READS_TO_REMOTE_MEM BIT(1) + +/* Non-Temporal Writes to Local Memory */ +#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) + +/* Non-Temporal Writes to Remote Memory */ +#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) + +/* Reads to Local Memory the system identifies as "Slow Memory" */ +#define READS_TO_LOCAL_S_MEM BIT(4) + +/* Reads to Remote Memory the system identifies as "Slow Memory" */ +#define READS_TO_REMOTE_S_MEM BIT(5) + +/* Dirty Victims to All Types of Memory */ +#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) + +/* Max event bits supported */ +#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) + +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/* + * Event IDs, the values match those used to program IA32_QM_EVTSEL before + * reading IA32_QM_CTR on RDT systems. + */ +enum resctrl_event_id { + QOS_L3_OCCUP_EVENT_ID = 0x01, + QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, + QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, +}; + +#endif /* __LINUX_RESCTRL_TYPES_H */ -- Gitee From 470ca8091b5ae39fb6ca55791b2b99d67584a62c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 17:54:18 +0000 Subject: [PATCH 571/953] x86/resctrl: Add a resctrl helper to reset all the resources ANBZ: #8626 commit 128aadaa200f229945c91d94e40548338bbae773 morse-linux. On umount(), resctrl resets each resource back to its default configuration. It only ever does this for all resources in one go. reset_all_ctrls() is architecture specific as it works with struct rdt_hw_resource. Add an architecture helper to reset all resources. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 2 ++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 16 +++++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index f61382258743..5f6a5375bb4a 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -15,6 +15,8 @@ */ #define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) +void resctrl_arch_reset_resources(void); + /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1a49c9918f8d..13c24cb18d76 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2859,6 +2859,14 @@ static int reset_all_ctrls(struct rdt_resource *r) return 0; } +void resctrl_arch_reset_resources(void) +{ + struct rdt_resource *r; + + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); +} + /* * Move tasks from one to the other group. If @from is NULL, then all tasks * in the systems are moved unconditionally (used for teardown). @@ -2968,16 +2976,14 @@ static void rmdir_all_sub(void) static void rdt_kill_sb(struct super_block *sb) { - struct rdt_resource *r; - cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_disable_ctx(); - /*Put everything back to default values. */ - for_each_alloc_capable_rdt_resource(r) - reset_all_ctrls(r); + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + rmdir_all_sub(); rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; -- Gitee From f94f8c07be8eb4afeed0423adfd1ff72c98baf70 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jan 2024 14:18:51 +0000 Subject: [PATCH 572/953] x86/resctrl: Move monitor init work to a resctrl init call ANBZ: #8626 commit b8401f504352b8e3dde8864ebaecb5b9cbbf12f8 morse-linux. rdt_get_mon_l3_config() is called from the architecture's resctrl_arch_late_init(), and initialises both architecture specific fields, such as hw_res->mon_scale and resctrl filesystem fields by calling dom_data_init(). To separate the filesystem and architecture parts of resctrl, this function needs splitting up. Add resctrl_mon_resource_init() to do the filesystem specific work, and call it from resctrl_init(). This runs later, but is still before the filesystem is mounted and the rmid_ptrs[] array can be used. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 24 +++++++++++++++++------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4 ++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 031948322eab..7a0c74779c53 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -540,6 +540,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg); void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first); +int resctrl_mon_resource_init(void); void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, int exclude_cpu); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 06565153ceb2..929ec1430b45 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1003,12 +1003,28 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_local_event.list, &r->evt_list); } +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + return 0; +} + int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); unsigned int threshold; - int ret; resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -1036,10 +1052,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) */ resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); - ret = dom_data_init(r); - if (ret) - return ret; - if (rdt_cpu_has(X86_FEATURE_BMEC)) { u32 eax, ebx, ecx, edx; @@ -1057,8 +1069,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) } } - l3_mon_evt_init(r); - r->mon_capable = true; return 0; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 13c24cb18d76..7a9696f53f2b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4138,6 +4138,10 @@ int __init resctrl_init(void) rdtgroup_setup_default(); + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); if (ret) return ret; -- Gitee From 81bc21fd2911dbb1a70e4778fecaebda5c95aec5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 24 Oct 2023 17:36:46 +0100 Subject: [PATCH 573/953] x86/resctrl: Move monitor exit work to a restrl exit call ANBZ: #8626 commit 7e7bad526c7be49af6d4f4e3382e6cfd212378dd morse-linux. rdt_put_mon_l3_config() is called via the architecture's resctrl_arch_exit() call, and appears to free the rmid_ptrs[] and closid_num_dirty_rmid[] arrays. In reality this code is marked __exit, and is removed by the linker as resctl can't be built as a module. MPAM can make use of this code from its error interrupt handler, a later patch drops all the __init/__exit annotations. To separate the filesystem and architecture parts of resctrl, this free()ing work needs to be triggered by the filesystem, as these structures belong to the filesystem code. Rename rdt_put_mon_l3_config() resctrl_mon_resource_exit() and call it from resctrl_exit(). The kfree() is currently dependent on r->mon_capable. resctrl_mon_resource_init() takes no arguments, so resctrl_mon_resource_exit() shouldn't take any either. Add the check to dom_data_exit(), making it take the resource as an argument. This makes it more symmetrical with dom_data_init(). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 5 ----- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 12 ++++++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 ++ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index a31bbad61b2e..4533e76f43f1 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -992,14 +992,9 @@ late_initcall(resctrl_arch_late_init); static void __exit resctrl_arch_exit(void) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - cpuhp_remove_state(rdt_online); resctrl_exit(); - - if (r->mon_capable) - rdt_put_mon_l3_config(); } __exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 7a0c74779c53..01fcd4ef26ca 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -533,7 +533,7 @@ void closid_free(int closid); int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void __exit rdt_put_mon_l3_config(void); +void __exit resctrl_mon_resource_exit(void); bool __init rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 929ec1430b45..2a1cbd4de6ee 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -954,10 +954,12 @@ static int dom_data_init(struct rdt_resource *r) return err; } -static void __exit dom_data_exit(void) +static void __exit dom_data_exit(struct rdt_resource *r) { - mutex_lock(&rdtgroup_mutex); + if (!r->mon_capable) + return; + mutex_lock(&rdtgroup_mutex); if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { kfree(closid_num_dirty_rmid); closid_num_dirty_rmid = NULL; @@ -1074,9 +1076,11 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void __exit rdt_put_mon_l3_config(void) +void __exit resctrl_mon_resource_exit(void) { - dom_data_exit(); + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); } void __init intel_rdt_mbm_apply_quirk(void) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7a9696f53f2b..6cf4ebe9c058 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4186,4 +4186,6 @@ void __exit resctrl_exit(void) debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); } -- Gitee From 20b5543508fbe9f5a4d16a887f134f0a5ace468c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 18:04:51 +0000 Subject: [PATCH 574/953] x86/resctrl: Move max_{name,data}_width into resctrl code ANBZ: #8626 commit 92a05bac93d1cba6950d0a9b9a4315c0b2d7d22f morse-linux. max_name_width and max_data_width are used to pad the strings in the resctrl schemata file. This should be part of the fs code as it influences the user-space interface, but currently max_data_width is generated by the arch init code. max_name_width is already managed by schemata_list_add(). Move the variables and max_data_width's initialisation code to rdtgroup.c. There is no need for an extra rdt_init_padding() helper as the length of the name can be considered when schemata_list_add() creates each schema entry. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 22 ---------------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++++++++++++ 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 4533e76f43f1..730d1b3db1f2 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -44,12 +44,6 @@ static DEFINE_MUTEX(domain_list_lock); */ DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); -/* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format - */ -int max_name_width, max_data_width; - /* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled. @@ -648,20 +642,6 @@ static int resctrl_arch_offline_cpu(unsigned int cpu) return 0; } -/* - * Choose a width for the resource name and resource data based on the - * resource that has widest name and cbm. - */ -static __init void rdt_init_padding(void) -{ - struct rdt_resource *r; - - for_each_alloc_capable_rdt_resource(r) { - if (r->data_width > max_data_width) - max_data_width = r->data_width; - } -} - enum { RDT_FLAG_CMT, RDT_FLAG_MBM_TOTAL, @@ -963,8 +943,6 @@ static int __init resctrl_arch_late_init(void) if (!get_rdt_resources()) return -ENODEV; - rdt_init_padding(); - state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", resctrl_arch_online_cpu, diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6cf4ebe9c058..e736e4d20f63 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -58,6 +58,12 @@ static struct kernfs_node *kn_mongrp; /* Kernel fs node for "mon_data" directory under root */ static struct kernfs_node *kn_mondata; +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + static struct seq_buf last_cmd_status; static char last_cmd_status_buf[512]; @@ -2595,6 +2601,12 @@ static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type if (cl > max_name_width) max_name_width = cl; + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + INIT_LIST_HEAD(&s->list); list_add(&s->list, &resctrl_schema_all); -- Gitee From 01ade1a2fe128f4720f457e1bfa56af881f3279c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 18:12:17 +0000 Subject: [PATCH 575/953] x86/resctrl: Stop using the for_each_*_rdt_resource() walkers ANBZ: #8626 commit a5b9d55f0b14d88ad1608c38f2033b057d122723 morse-linux. The for_each_*_rdt_resource() helpers walk the architectures array of structures, using the resctrl visible part as an iterator. These became over-complex when the structures were split into a filesystem and architecture-specific struct. This approach avoided the need to touch every call site. Once the filesystem parts of resctrl are moved to /fs/, both the architecture's resource array, and the definition of those structures is no longer accessible. To support resctrl, each architecture would have to provide equally complex macros. Change the resctrl code that uses these to walk through the resource_level enum and check the mon/alloc capable flags instead. Instances in core.c, and resctrl_arch_reset_resources() remain part of x86's architecture specific code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 7 +++++- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 30 +++++++++++++++++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 884b88e25141..f2315a50ea4f 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -840,6 +840,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) { cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; struct rdt_resource *r; struct rdt_domain *d_i; bool ret = false; @@ -854,7 +855,11 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * First determine which cpus have pseudo-locked regions * associated with them. */ - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + list_for_each_entry(d_i, &r->domains, list) { if (d_i->plr) cpumask_or(cpu_with_psl, cpu_with_psl, diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e736e4d20f63..3f16e7854411 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -98,12 +98,17 @@ void rdt_last_cmd_printf(const char *fmt, ...) void rdt_staged_configs_clear(void) { + enum resctrl_res_level i; struct rdt_resource *r; struct rdt_domain *dom; lockdep_assert_held(&rdtgroup_mutex); - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + list_for_each_entry(dom, &r->domains, list) memset(dom->staged_config, 0, sizeof(dom->staged_config)); } @@ -2181,6 +2186,7 @@ static int rdtgroup_mkdir_info_resdir(void *priv, char *name, static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) { + enum resctrl_res_level i; struct resctrl_schema *s; struct rdt_resource *r; unsigned long fflags; @@ -2205,8 +2211,12 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) goto out_destroy; } - for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RFTYPE_MON_INFO; + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; sprintf(name, "%s_MON", r->name); ret = rdtgroup_mkdir_info_resdir(r, name, fflags); if (ret) @@ -2615,10 +2625,15 @@ static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type static int schemata_list_create(void) { + enum resctrl_res_level i; struct rdt_resource *r; int ret = 0; - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + if (resctrl_arch_get_cdp_enabled(r->rid)) { ret = schemata_list_add(r, CDP_CODE); if (ret) @@ -3166,6 +3181,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **dest_kn) { + enum resctrl_res_level i; struct rdt_resource *r; struct kernfs_node *kn; int ret; @@ -3184,7 +3200,11 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, * Create the subdirectories for each domain. Note that all events * in a domain like L3 are grouped into a resource whose domain is L3 */ - for_each_mon_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); if (ret) goto out_destroy; -- Gitee From 98d38fb8792ea908006431e54a1527ad5f1ad4b6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 10:26:39 +0000 Subject: [PATCH 576/953] x86/resctrl: Export the is_mbm_*_enabled() helpers to asm/resctrl.h ANBZ: #8626 commit 1b09c3e0ec8bc1a30b01d466b27565e06999414d morse-linux. The architecture specific parts of resctrl have helpers to hide accesses to the rdt_mon_features bitmap. Once the filesystem parts of resctrl are moved, these can no longer live in internal.h. Once these are exposed to the wider kernel, they should have a 'resctrl_arch_' prefix, to fit the rest of the arch<->fs interface. Move and rename the helpers that touch rdt_mon_features directly. is_mbm_event() and is_mbm_enabled() are only called from rdtgroup.c, so can be moved into that file. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 17 +++++++++++ arch/x86/kernel/cpu/resctrl/core.c | 4 +-- arch/x86/kernel/cpu/resctrl/internal.h | 27 ----------------- arch/x86/kernel/cpu/resctrl/monitor.c | 18 ++++++------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 40 +++++++++++++++++--------- 5 files changed, 54 insertions(+), 52 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 5f6a5375bb4a..50407e83d0ca 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * This value can never be a valid CLOSID, and is used when mapping a @@ -43,6 +44,7 @@ DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); extern bool rdt_alloc_capable; extern bool rdt_mon_capable; +extern unsigned int rdt_mon_features; DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); @@ -82,6 +84,21 @@ static inline void resctrl_arch_disable_mon(void) static_branch_dec_cpuslocked(&rdt_enable_key); } +static inline bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_local_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 730d1b3db1f2..675ebc6b0063 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -481,13 +481,13 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) { size_t tsize; - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM; } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_local); hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) { diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 01fcd4ef26ca..edbccc79246f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -130,7 +130,6 @@ struct rmid_read { void *arch_mon_ctx; }; -extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; extern bool resctrl_mounted; @@ -360,32 +359,6 @@ struct msr_param { u32 high; }; -static inline bool is_llc_occupancy_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); -} - -static inline bool is_mbm_total_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); -} - -static inline bool is_mbm_local_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); -} - -static inline bool is_mbm_enabled(void) -{ - return (is_mbm_total_enabled() || is_mbm_local_enabled()); -} - -static inline bool is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 2a1cbd4de6ee..c9be2d0819c0 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -251,11 +251,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) memset(hw_dom->arch_mbm_total, 0, sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) memset(hw_dom->arch_mbm_local, 0, sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); } @@ -514,7 +514,7 @@ void free_rmid(u32 closid, u32 rmid) entry = __rmid_entry(idx); - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); else list_add_tail(&entry->list, &rmid_free_lru); @@ -666,7 +666,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct list_head *head; struct rdtgroup *entry; - if (!is_mbm_local_enabled()) + if (!resctrl_arch_is_mbm_local_enabled()) return; r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); @@ -735,7 +735,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, * This is protected from concurrent reads from user * as both the user and we hold the global mutex. */ - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); @@ -749,7 +749,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); @@ -997,11 +997,11 @@ static void l3_mon_evt_init(struct rdt_resource *r) { INIT_LIST_HEAD(&r->evt_list); - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) list_add_tail(&mbm_total_event.list, &r->evt_list); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) list_add_tail(&mbm_local_event.list, &r->evt_list); } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 3f16e7854411..8285b916289c 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -114,6 +114,18 @@ void rdt_staged_configs_clear(void) } } +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_L3_MBM_LOCAL_EVENT_ID); +} + /* * Trivial allocator for CLOSIDs. Since h/w only supports a small number, * we can keep a bitmap of free CLOSIDs in a single integer. @@ -161,7 +173,7 @@ static int closid_alloc(void) lockdep_assert_held(&rdtgroup_mutex); if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && - is_llc_occupancy_enabled()) { + resctrl_arch_is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; @@ -2370,7 +2382,7 @@ static bool supports_mba_mbps(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - return (is_mbm_local_enabled() && + return (resctrl_arch_is_mbm_local_enabled() && r->alloc_capable && is_mba_linear()); } @@ -2738,7 +2750,7 @@ static int rdt_get_tree(struct fs_context *fc) if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) resctrl_mounted = true; - if (is_mbm_enabled()) { + if (resctrl_is_mbm_enabled()) { list_for_each_entry(dom, &l3->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); @@ -3107,7 +3119,7 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, if (ret) goto out_destroy; - if (is_mbm_event(mevt->evtid)) + if (resctrl_is_mbm_event(mevt->evtid)) mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); } kernfs_activate(kn); @@ -4006,9 +4018,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) if (resctrl_mounted && resctrl_arch_mon_capable()) rmdir_mondata_subdir_allrdtgrp(r, d->id); - if (is_mbm_enabled()) + if (resctrl_is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(d)) { + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { /* * When a package is going down, forcefully * decrement rmid->ebusy. There is no way to know @@ -4032,12 +4044,12 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) u32 idx_limit = resctrl_arch_system_num_rmid_idx(); size_t tsize; - if (is_llc_occupancy_enabled()) { + if (resctrl_arch_is_llc_occupancy_enabled()) { d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); if (!d->rmid_busy_llc) return -ENOMEM; } - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*d->mbm_total); d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_total) { @@ -4045,7 +4057,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) return -ENOMEM; } } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*d->mbm_local); d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_local) { @@ -4077,13 +4089,13 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (err) goto out_unlock; - if (is_mbm_enabled()) { + if (resctrl_is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); } - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); /* @@ -4138,12 +4150,12 @@ void resctrl_offline_cpu(unsigned int cpu) d = get_domain_from_cpu(cpu, l3); if (d) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); mbm_setup_overflow_handler(d, 0, cpu); } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(d)) { + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); cqm_setup_limbo_handler(d, 0, cpu); } -- Gitee From 51b8ceec42245fd4cb0aa892a3e4e6b2209fab7b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Mar 2023 16:11:32 +0000 Subject: [PATCH 577/953] x86/resctrl: Add resctrl_arch_is_evt_configurable() to abstract BMEC ANBZ: #8626 commit 9adbcdc241c348ff91aad916cdc12aa3dd45dbd7 morse-linux. When BMEC is supported the resctrl event can be configured in a number of ways. This depends on architecture support. rdt_get_mon_l3_config() modifies the struct mon_evt and calls mbm_config_rftype_init() to create the files that allow the configuration. Splitting this into separate architecture and filesystem parts would require the struct mon_evt and mbm_config_rftype_init() to be exposed. Instead, add resctrl_arch_is_evt_configurable(), and use this from resctrl_mon_resource_init() to initialise struct mon_evt and call mbm_config_rftype_init(). resctrl_arch_is_evt_configurable() calls rdt_cpu_has() so it doesn't obviously benefit from being inlined. Putting it in core.c will allow rdt_cpu_has() to eventually become static. resctrl_arch_is_evt_configurable() uses rdt_cpu_has() from resctrl_mon_resource_init(), which isn't marked __init. In addition, MPAM needs to initialise resctrl late. Drop the __init on the relevant functions. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 19 +++++++++++++++++-- arch/x86/kernel/cpu/resctrl/internal.h | 4 ++-- arch/x86/kernel/cpu/resctrl/monitor.c | 18 +++++++++--------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- include/linux/resctrl.h | 2 ++ 5 files changed, 31 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 675ebc6b0063..3226c994afec 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -667,7 +667,7 @@ struct rdt_options { bool force_off, force_on; }; -static struct rdt_options rdt_options[] __initdata = { +static struct rdt_options rdt_options[] __ro_after_init = { RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), @@ -707,7 +707,7 @@ static int __init set_rdt_options(char *str) } __setup("rdt", set_rdt_options); -bool __init rdt_cpu_has(int flag) +bool rdt_cpu_has(int flag) { bool ret = boot_cpu_has(flag); struct rdt_options *o; @@ -727,6 +727,21 @@ bool __init rdt_cpu_has(int flag) return ret; } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + if (!rdt_cpu_has(X86_FEATURE_BMEC)) + return false; + + switch (evt) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL); + case QOS_L3_MBM_LOCAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL); + default: + return false; + } +} + static __init bool get_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index edbccc79246f..46370eafb00f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -507,7 +507,7 @@ int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit resctrl_mon_resource_exit(void); -bool __init rdt_cpu_has(int flag); +bool rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, @@ -527,7 +527,7 @@ bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); -void __init mbm_config_rftype_init(const char *config); +void mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c9be2d0819c0..ccb85c61b43b 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1019,6 +1019,15 @@ int resctrl_mon_resource_init(void) l3_mon_evt_init(r); + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + return 0; } @@ -1060,15 +1069,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) /* Detect list of bandwidth sources that can be tracked */ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; - - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } } r->mon_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 8285b916289c..2d6f4e0d3656 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2068,7 +2068,7 @@ void __init thread_throttle_mode_init(void) rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; } -void __init mbm_config_rftype_init(const char *config) +void mbm_config_rftype_init(const char *config) { struct rftype *rft; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index b0ee7256e095..bfc63e8219e5 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -204,6 +204,8 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From 71e9fcd29763cd59ab987d1dde225acd64be4b1c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Mar 2023 16:47:42 +0000 Subject: [PATCH 578/953] x86/resctrl: Change mon_event_config_{read,write}() to be arch helpers ANBZ: #8626 commit 297671c1f7c0c747a2ca4d007b3284f100dd0890 morse-linux. mon_event_config_{read,write}() are called via IPI and access model specific registers to do their work. To support another architecture, this needs abstracting. Rename mon_event_config_{read,write}() to have a resctrl_arch_ prefix, and move their struct mon_config_info parameter into the restrl_types header. This allows another architecture to supply an implementation of these. As struct mon_config_info is now exposed globally, give it a 'resctrl_' prefix. MPAM systems need access to the domain to do this work, add the resource and domain to struct resctrl_mon_config_info. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 34 +++++++++++++------------- include/linux/resctrl.h | 9 +++++++ 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2d6f4e0d3656..e76018687117 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1580,11 +1580,6 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, return ret; } -struct mon_config_info { - u32 evtid; - u32 mon_config; -}; - #define INVALID_CONFIG_INDEX UINT_MAX /** @@ -1609,9 +1604,9 @@ static inline unsigned int mon_event_config_index_get(u32 evtid) } } -static void mon_event_config_read(void *info) +void resctrl_arch_mon_event_config_read(void *info) { - struct mon_config_info *mon_info = info; + struct resctrl_mon_config_info *mon_info = info; unsigned int index; u64 msrval; @@ -1626,14 +1621,15 @@ static void mon_event_config_read(void *info) mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) { - smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); } static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) { - struct mon_config_info mon_info = {0}; + struct resctrl_mon_config_info mon_info = {0}; struct rdt_domain *dom; bool sep = false; @@ -1644,9 +1640,11 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid if (sep) seq_puts(s, ";"); - memset(&mon_info, 0, sizeof(struct mon_config_info)); + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; mon_info.evtid = evtid; - mondata_config_read(dom, &mon_info); + mondata_config_read(&mon_info); seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); sep = true; @@ -1679,9 +1677,9 @@ static int mbm_local_bytes_config_show(struct kernfs_open_file *of, return 0; } -static void mon_event_config_write(void *info) +void resctrl_arch_mon_event_config_write(void *info) { - struct mon_config_info *mon_info = info; + struct resctrl_mon_config_info *mon_info = info; unsigned int index; index = mon_event_config_index_get(mon_info->evtid); @@ -1695,14 +1693,16 @@ static void mon_event_config_write(void *info) static void mbm_config_write_domain(struct rdt_resource *r, struct rdt_domain *d, u32 evtid, u32 val) { - struct mon_config_info mon_info = {0}; + struct resctrl_mon_config_info mon_info = {0}; /* * Read the current config value first. If both are the same then * no need to write it again. */ + mon_info.r = r; + mon_info.d = d; mon_info.evtid = evtid; - mondata_config_read(d, &mon_info); + mondata_config_read(&mon_info); if (mon_info.mon_config == val) return; @@ -1714,7 +1714,7 @@ static void mbm_config_write_domain(struct rdt_resource *r, * are scoped at the domain level. Writing any of these MSRs * on one CPU is observed by all the CPUs in the domain. */ - smp_call_function_any(&d->cpu_mask, mon_event_config_write, + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, &mon_info, 1); /* diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bfc63e8219e5..975b80102fbe 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -192,6 +192,13 @@ struct resctrl_cpu_sync { u32 rmid; }; +struct resctrl_mon_config_info { + struct rdt_resource *r; + struct rdt_domain *d; + u32 evtid; + u32 mon_config; +}; + /* * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to * struct resctrl_cpu_sync, or NULL. @@ -205,6 +212,8 @@ struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); +void resctrl_arch_mon_event_config_write(void *info); +void resctrl_arch_mon_event_config_read(void *info); /* * Update the ctrl_val and apply this config right now. -- Gitee From 10f8d62b4d62cab5d38e3cbaa09d9706192fa9da Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 12 Feb 2024 18:36:57 +0000 Subject: [PATCH 579/953] x86/resctrl: Move mbm_cfg_mask to struct rdt_resource ANBZ: #8626 commit d4c4b30510913f35145f2bf03b3fc888dae9dc2e morse-linux. The mbm_cfg_mask field lists the bits that user-space can set when configuring an event. This value is output via the last_cmd_status file. Once the filesystem parts of resctrl are moved to live in /fs/, the struct rdt_hw_resource is inaccessible to the filesystem code. Because this value is output to user-space, it has to be accessible to the filesystem code. Move it to struct rdt_resource. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 3 --- arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 5 ++--- include/linux/resctrl.h | 3 +++ 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 46370eafb00f..238b81d3f64a 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -371,8 +371,6 @@ struct msr_param { * @msr_update: Function pointer to update QOS MSRs * @mon_scale: cqm counter * mon_scale = occupancy in bytes * @mbm_width: Monitor width, to detect and correct for overflow. - * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth - * Monitoring Event Configuration (BMEC) is supported. * @cdp_enabled: CDP state of this resource * * Members of this structure are either private to the architecture @@ -387,7 +385,6 @@ struct rdt_hw_resource { struct rdt_resource *r); unsigned int mon_scale; unsigned int mbm_width; - unsigned int mbm_cfg_mask; bool cdp_enabled; }; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index ccb85c61b43b..287fb0a5f060 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1068,7 +1068,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) /* Detect list of bandwidth sources that can be tracked */ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); - hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; + r->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; } r->mon_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e76018687117..3d3a839eba6b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1731,7 +1731,6 @@ static void mbm_config_write_domain(struct rdt_resource *r, static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) { - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; @@ -1758,9 +1757,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) } /* Value from user cannot be more than the supported set of events */ - if ((val & hw_res->mbm_cfg_mask) != val) { + if ((val & r->mbm_cfg_mask) != val) { rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", - hw_res->mbm_cfg_mask); + r->mbm_cfg_mask); return -EINVAL; } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 975b80102fbe..8a7367d1ce45 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -140,6 +140,8 @@ struct resctrl_membw { * @format_str: Per resource format string to show domain value * @evt_list: List of monitoring events * @fflags: flags to choose base and info files + * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth + * Monitoring Event Configuration (BMEC) is supported. * @cdp_capable: Is the CDP feature available on this resource */ struct rdt_resource { @@ -157,6 +159,7 @@ struct rdt_resource { const char *format_str; struct list_head evt_list; unsigned long fflags; + unsigned int mbm_cfg_mask; bool cdp_capable; }; -- Gitee From 9a7e802a926fae6e236efb1bb11d3fc878895a6d Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 9 Mar 2023 16:47:38 +0000 Subject: [PATCH 580/953] x86/resctrl: Allow resctrl_arch_mon_event_config_write() to return an error ANBZ: #8626 commit e51dc33bababe3f5ebfeb622076ac89b6d89332e morse-linux. resctrl_arch_mon_event_config_write() writes a bitmap of events provided by user-space into the configuration register for the monitors. This assumes that all architectures support all the features each bit corresponds to. MPAM can filter monitors based on read, write, or both, but there are many more options in the existing bitmap. To allow this interface to work for machines with MPAM, allow the architecture helper to return an error if an incompatible bitmap is set. When valid values are provided, there is no change in behaviour. If an invalid value is provided, currently it is silently ignored, but last_cmd_status is updated. After this change, the parser will stop at the first invalid value and return an error to user-space. This matches the way changes to the schemata file are made. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 20 ++++++++++++++++---- include/linux/resctrl.h | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 3d3a839eba6b..56a0bfdc11f7 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1685,13 +1685,16 @@ void resctrl_arch_mon_event_config_write(void *info) index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; return; } wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); + + mon_info->err = 0; } -static void mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) { struct resctrl_mon_config_info mon_info = {0}; @@ -1704,7 +1707,7 @@ static void mbm_config_write_domain(struct rdt_resource *r, mon_info.evtid = evtid; mondata_config_read(&mon_info); if (mon_info.mon_config == val) - return; + return 0; mon_info.mon_config = val; @@ -1716,6 +1719,10 @@ static void mbm_config_write_domain(struct rdt_resource *r, */ smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } /* * When an Event Configuration is changed, the bandwidth counters @@ -1727,6 +1734,8 @@ static void mbm_config_write_domain(struct rdt_resource *r, * mbm_local and mbm_total counts for all the RMIDs. */ resctrl_arch_reset_rmid_all(r, d); + + return 0; } static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) @@ -1734,6 +1743,7 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; + int err; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); @@ -1765,7 +1775,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { - mbm_config_write_domain(r, d, evtid, val); + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; goto next; } } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8a7367d1ce45..6705d7960dfd 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -200,6 +200,7 @@ struct resctrl_mon_config_info { struct rdt_domain *d; u32 evtid; u32 mon_config; + int err; }; /* -- Gitee From 6c15877cc5c10e5248c5f58d12193fe4354ed40e Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2024 15:42:33 +0000 Subject: [PATCH 581/953] x86/resctrl: Add resctrl_arch_ prefix to pseudo lock functions ANBZ: #8626 commit 9166173fea78964e579f1d2beafaed1498fba820 morse-linux. resctrl's pseudo lock has some copy-to-cache and measurement functions that are micro-architecture specific. pseudo_lock_fn() is not at all portable. Label these 'resctrl_arch_' so they stay under /arch/x86. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 5 ++++ arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 36 ++++++++++++----------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 50407e83d0ca..a88af68f9fe2 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -211,6 +211,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx) { }; +u64 resctrl_arch_get_prefetch_disable_bits(void); +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp); +int resctrl_arch_measure_cycles_lat_fn(void *_plr); +int resctrl_arch_measure_l2_residency(void *_plr); +int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index f2315a50ea4f..856beb6f668b 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -62,7 +62,8 @@ static const struct class pseudo_lock_class = { }; /** - * get_prefetch_disable_bits - prefetch disable bits of supported platforms + * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported + * platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support @@ -76,13 +77,13 @@ static const struct class pseudo_lock_class = { * in the SDM. * * When adding a platform here also add support for its cache events to - * measure_cycles_perf_fn() + * resctrl_arch_measure_l*_residency() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ -static u64 get_prefetch_disable_bits(void) +u64 resctrl_arch_get_prefetch_disable_bits(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) @@ -410,7 +411,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) } /** - * pseudo_lock_fn - Load kernel memory into cache + * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache * @_rdtgrp: resource group to which pseudo-lock region belongs * * This is the core pseudo-locking flow. @@ -428,7 +429,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) { struct rdtgroup *rdtgrp = _rdtgrp; struct pseudo_lock_region *plr = rdtgrp->plr; @@ -714,7 +715,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ - prefetch_disable_bits = get_prefetch_disable_bits(); + prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; @@ -879,7 +880,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) } /** - * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory + * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read + * pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One @@ -892,7 +894,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int measure_cycles_lat_fn(void *_plr) +int resctrl_arch_measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; @@ -1076,7 +1078,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, return 0; } -static int measure_l2_residency(void *_plr) +int resctrl_arch_measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1114,7 +1116,7 @@ static int measure_l2_residency(void *_plr) return 0; } -static int measure_l3_residency(void *_plr) +int resctrl_arch_measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1212,18 +1214,18 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) plr->cpu = cpu; if (sel == 1) - thread = kthread_create_on_node(measure_cycles_lat_fn, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 2) - thread = kthread_create_on_node(measure_l2_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 3) - thread = kthread_create_on_node(measure_l3_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else @@ -1322,7 +1324,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) plr->thread_done = 0; - thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, rdtgrp, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { -- Gitee From 3b4ff9a47f09440ad61eae7e886474d3f1207ecc Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2024 15:48:31 +0000 Subject: [PATCH 582/953] x86/resctrl: Allow an architecture to disable pseudo lock ANBZ: #8626 commit 508f630578e16a0318d333863b4d37fd5b65e3c5 morse-linux. Pseudo-lock relies on knowledge of the micro-architecture to disable prefetchers etc. On arm64 these controls are typically secure only, meaning linux can't access them. Arm's cache-lockdown feature works in a very different way. Resctrl's pseudo-lock isn't going to be used on arm64 platforms. Add a Kconfig symbol that can be selected by the architecture. This enables or disables building of the psuedo_lock.c file, and replaces the functions with stubs. An additional IS_ENABLED() check is needed in rdtgroup_mode_write() so that attempting to enable pseudo-lock reports an "Unknown or unsupported mode" to user-space. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/Kconfig | 7 ++++ arch/x86/kernel/cpu/resctrl/Makefile | 5 +-- arch/x86/kernel/cpu/resctrl/internal.h | 48 +++++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 3 +- 4 files changed, 52 insertions(+), 11 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a649d7c04fb8..1973f40663e7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -481,6 +481,7 @@ config X86_CPU_RESCTRL depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS select PROC_CPU_RESCTRL if PROC_FS + select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -497,6 +498,12 @@ config X86_CPU_RESCTRL Say N if unsure. +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + if X86_32 config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 4a06c37b9cf1..0c13b0befd8a 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 238b81d3f64a..d6db15839dc4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -489,14 +489,6 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm); enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); int rdtgroup_tasks_assigned(struct rdtgroup *r); -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); @@ -529,4 +521,44 @@ void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 56a0bfdc11f7..9275d6f8a74e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1452,7 +1452,8 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; } rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (!strcmp(buf, "pseudo-locksetup")) { + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { ret = rdtgroup_locksetup_enter(rdtgrp); if (ret) goto out; -- Gitee From 9b329c86e4e5b149bc72e3c005545c130e99c518 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 11:35:14 +0000 Subject: [PATCH 583/953] x86/resctrl: Make prefetch_disable_bits belong to the arch code ANBZ: #8626 commit 53e04c1d92d1c2bd575a8989c82c909af4a1056e morse-linux. prefetch_disable_bits is set by rdtgroup_locksetup_enter() from a value provided by the architecture, but is largely read by other architecture helpers. Instead of exporting this value, make resctrl_arch_get_prefetch_disable_bits() set it so that the other arch-code helpers can use the cached-value. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 856beb6f668b..5a66e3b2c2ea 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -85,6 +85,8 @@ static const struct class pseudo_lock_class = { */ u64 resctrl_arch_get_prefetch_disable_bits(void) { + prefetch_disable_bits = 0; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return 0; @@ -100,7 +102,8 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) * 3 DCU IP Prefetcher Disable (R/W) * 63:4 Reserved */ - return 0xF; + prefetch_disable_bits = 0xF; + break; case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* @@ -111,10 +114,11 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) * 2 DCU Hardware Prefetcher Disable (R/W) * 63:3 Reserved */ - return 0x5; + prefetch_disable_bits = 0x5; + break; } - return 0; + return prefetch_disable_bits; } /** @@ -715,8 +719,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ - prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits(); - if (prefetch_disable_bits == 0) { + if (resctrl_arch_get_prefetch_disable_bits() == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; } -- Gitee From 3b2cab085b5fa8e7c2a643f44a2ffc72f00dac40 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 17:17:15 +0000 Subject: [PATCH 584/953] x86/resctrl: Make resctrl_arch_pseudo_lock_fn() take a plr ANBZ: #8626 commit 02cf773e40a6f631bf3f37992d7ffb6f86ca2f4b morse-linux. resctrl_arch_pseudo_lock_fn() has architecture specific behaviour, and takes a struct rdtgroup as an argument. After the filesystem code moves to /fs/, the definition of struct rdtgroup will not be available to the architecture code. The only reason resctrl_arch_pseudo_lock_fn() wants the rdtgroup is for the CLOSID. Embed that in the pseudo_lock_region as a hw_closid, and move the definition of struct pseudo_lock_region to resctrl.h. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 37 --------------------- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 13 ++++---- include/linux/resctrl.h | 39 +++++++++++++++++++++++ 4 files changed, 47 insertions(+), 44 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index a88af68f9fe2..9940398e367e 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -212,7 +212,7 @@ static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx) { }; u64 resctrl_arch_get_prefetch_disable_bits(void); -int resctrl_arch_pseudo_lock_fn(void *_rdtgrp); +int resctrl_arch_pseudo_lock_fn(void *_plr); int resctrl_arch_measure_cycles_lat_fn(void *_plr); int resctrl_arch_measure_l2_residency(void *_plr); int resctrl_arch_measure_l3_residency(void *_plr); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index d6db15839dc4..be4e8f31b127 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -182,43 +182,6 @@ struct mongroup { u32 rmid; }; -/** - * struct pseudo_lock_region - pseudo-lock region information - * @s: Resctrl schema for the resource to which this - * pseudo-locked region belongs - * @d: RDT domain to which this pseudo-locked region - * belongs - * @cbm: bitmask of the pseudo-locked region - * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread - * completion - * @thread_done: variable used by waitqueue to test if pseudo-locking - * thread completed - * @cpu: core associated with the cache on which the setup code - * will be run - * @line_size: size of the cache lines - * @size: size of pseudo-locked region in bytes - * @kmem: the kernel memory associated with pseudo-locked region - * @minor: minor number of character device associated with this - * region - * @debugfs_dir: pointer to this region's directory in the debugfs - * filesystem - * @pm_reqs: Power management QoS requests related to this region - */ -struct pseudo_lock_region { - struct resctrl_schema *s; - struct rdt_domain *d; - u32 cbm; - wait_queue_head_t lock_thread_wq; - int thread_done; - int cpu; - unsigned int line_size; - unsigned int size; - void *kmem; - unsigned int minor; - struct dentry *debugfs_dir; - struct list_head pm_reqs; -}; - /** * struct rdtgroup - store rdtgroup's data in resctrl file system. * @kn: kernfs node diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 5a66e3b2c2ea..ba51ab1f70e6 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -416,7 +416,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) /** * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache - * @_rdtgrp: resource group to which pseudo-lock region belongs + * @_plr: the pseudo-lock region descriptor * * This is the core pseudo-locking flow. * @@ -433,10 +433,9 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_plr) { - struct rdtgroup *rdtgrp = _rdtgrp; - struct pseudo_lock_region *plr = rdtgrp->plr; + struct pseudo_lock_region *plr = _plr; u32 rmid_p, closid_p; unsigned long i; u64 saved_msr; @@ -496,7 +495,8 @@ int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); + __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + /* * Cache was flushed earlier. Now access kernel memory to read it * into cache region associated with just activated plr->closid. @@ -1327,7 +1327,8 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) plr->thread_done = 0; - thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, rdtgrp, + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 6705d7960dfd..3de5bc63ace0 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -25,6 +25,45 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX +/** + * struct pseudo_lock_region - pseudo-lock region information + * @s: Resctrl schema for the resource to which this + * pseudo-locked region belongs + * @closid: The closid that this pseudo-locked region uses + * @d: RDT domain to which this pseudo-locked region + * belongs + * @cbm: bitmask of the pseudo-locked region + * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread + * completion + * @thread_done: variable used by waitqueue to test if pseudo-locking + * thread completed + * @cpu: core associated with the cache on which the setup code + * will be run + * @line_size: size of the cache lines + * @size: size of pseudo-locked region in bytes + * @kmem: the kernel memory associated with pseudo-locked region + * @minor: minor number of character device associated with this + * region + * @debugfs_dir: pointer to this region's directory in the debugfs + * filesystem + * @pm_reqs: Power management QoS requests related to this region + */ +struct pseudo_lock_region { + struct resctrl_schema *s; + u32 closid; + struct rdt_domain *d; + u32 cbm; + wait_queue_head_t lock_thread_wq; + int thread_done; + int cpu; + unsigned int line_size; + unsigned int size; + void *kmem; + unsigned int minor; + struct dentry *debugfs_dir; + struct list_head pm_reqs; +}; + /** * struct resctrl_staged_config - parsed configuration to be applied * @new_ctrl: new ctrl value to be loaded -- Gitee From cb31b923fa426a7d58aa68248a3da4c202cd7e4a Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Dec 2021 17:22:12 +0000 Subject: [PATCH 585/953] x86/resctrl: Move thread_throttle_mode_init() to be managed by resctrl ANBZ: #8626 commit 9dcfdbe39fb9ec2d4506330aed65686e9a62dc9d morse-linux. thread_throttle_mode_init() is called from the architecture specific code to make the 'thread_throttle_mode' file visible. The architecture specific code has already set the membw.throttle_mode in the rdt_resource. This doesn't need to be specific to the architecture, the throttle_mode can be used by resctrl to determine if the 'thread_throttle_mode' file should be visible. Call thread_throttle_mode_init() from resctrl_setup(), check the membw.throttle_mode on the MBA resource. This avoids publishing an extra function between the architecture and filesystem code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 1 - arch/x86/kernel/cpu/resctrl/internal.h | 1 - arch/x86/kernel/cpu/resctrl/rdtgroup.c | 9 ++++++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 3226c994afec..2fbd35766938 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -227,7 +227,6 @@ static bool __get_mem_config_intel(struct rdt_resource *r) r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; else r->membw.throttle_mode = THREAD_THROTTLE_MAX; - thread_throttle_mode_init(); r->alloc_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index be4e8f31b127..e849d4407769 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -478,7 +478,6 @@ void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void __init thread_throttle_mode_init(void); void mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 9275d6f8a74e..702a94fad6db 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2069,10 +2069,15 @@ static struct rftype *rdtgroup_get_rftype_by_name(const char *name) return NULL; } -void __init thread_throttle_mode_init(void) +static void __init thread_throttle_mode_init(void) { + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); struct rftype *rft; + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); if (!rft) return; @@ -4194,6 +4199,8 @@ int __init resctrl_init(void) rdtgroup_setup_default(); + thread_throttle_mode_init(); + ret = resctrl_mon_resource_init(); if (ret) return ret; -- Gitee From 93b1b23caef312b84266558046cf600cbecd3479 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 14:15:49 +0100 Subject: [PATCH 586/953] x86/resctrl: Move get_config_index() to a header ANBZ: #8626 commit 24e3994691f6f0c2fcc7d4b167fea30c3d8c4d39 morse-linux. get_config_index() is used by the architecture specific code to map a CLOSID+type pair to an index in the configuration arrays. MPAM needs to do this too to preserve the ABI to user-space, there is no reason to do it differently. Move the helper to a header file. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 19 +++---------------- include/linux/resctrl.h | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 3603ade95f1d..b4627ae19291 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -277,19 +277,6 @@ static int parse_line(char *line, struct resctrl_schema *s, return -EINVAL; } -static u32 get_config_index(u32 closid, enum resctrl_conf_type type) -{ - switch (type) { - default: - case CDP_NONE: - return closid; - case CDP_CODE: - return closid * 2 + 1; - case CDP_DATA: - return closid * 2; - } -} - static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) @@ -311,7 +298,7 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, t); + u32 idx = resctrl_get_config_index(closid, t); struct msr_param msr_param; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) @@ -351,7 +338,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) if (!cfg->have_new_ctrl) continue; - idx = get_config_index(closid, t); + idx = resctrl_get_config_index(closid, t); if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; @@ -476,7 +463,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, type); + u32 idx = resctrl_get_config_index(closid, type); return hw_dom->ctrl_val[idx]; } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 3de5bc63ace0..73c111963433 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -258,6 +258,21 @@ bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); void resctrl_arch_mon_event_config_write(void *info); void resctrl_arch_mon_event_config_read(void *info); +/* For use by arch code to remap resctrl's smaller CDP CLOSID range */ +static inline u32 resctrl_get_config_index(u32 closid, + enum resctrl_conf_type type) +{ + switch (type) { + default: + case CDP_NONE: + return closid; + case CDP_CODE: + return (closid * 2) + 1; + case CDP_DATA: + return (closid * 2); + } +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From e26e04fb003a0032c9a07905e8ec549f4c7bd200 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 11:42:19 +0000 Subject: [PATCH 587/953] x86/resctrl: Claim get_domain_from_cpu() for resctrl ANBZ: #8626 commit 8f3c04a0853a2baae0369d079012906bc3ff68ff morse-linux. get_domain_from_cpu() is a handy helper that both the arch code and resctrl need to use. Rename it resctrl_get_domain_from_cpu() so it gets moved out to /fs, and exported back to the arch code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 15 +-------------- arch/x86/kernel/cpu/resctrl/internal.h | 1 - arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- include/linux/resctrl.h | 19 +++++++++++++++++++ 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 2fbd35766938..f0d87751c7cc 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -355,19 +355,6 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) -{ - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - /* Find the domain that contains this CPU */ - if (cpumask_test_cpu(cpu, &d->cpu_mask)) - return d; - } - - return NULL; -} - u32 resctrl_arch_get_num_closid(struct rdt_resource *r) { return resctrl_to_arch_res(r)->num_closid; @@ -381,7 +368,7 @@ void rdt_ctrl_update(void *arg) int cpu = smp_processor_id(); struct rdt_domain *d; - d = get_domain_from_cpu(cpu, r); + d = resctrl_get_domain_from_cpu(cpu, r); if (d) { hw_res->msr_update(d, m, r); return; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index e849d4407769..3a3962736061 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -452,7 +452,6 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm); enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); int rdtgroup_tasks_assigned(struct rdtgroup *r); -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); int alloc_rmid(u32 closid); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 287fb0a5f060..8b316d9acc3b 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -676,7 +676,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) idx = resctrl_arch_rmid_idx_encode(closid, rmid); pmbm_data = &dom_mbm->mbm_local[idx]; - dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); if (!dom_mba) { pr_warn_once("Failure to get domain for MBA update\n"); return; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 702a94fad6db..085fb9c2333a 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4165,7 +4165,7 @@ void resctrl_offline_cpu(unsigned int cpu) if (!l3->mon_capable) goto out_unlock; - d = get_domain_from_cpu(cpu, l3); + d = resctrl_get_domain_from_cpu(cpu, l3); if (d) { if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 73c111963433..84420253dc05 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,7 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include #include #include #include @@ -273,6 +274,24 @@ static inline u32 resctrl_get_config_index(u32 closid, } } +/* + * Caller must hold the cpuhp read lock to prevent the struct rdt_domain being + * freed. + */ +static inline struct rdt_domain * +resctrl_get_domain_from_cpu(int cpu, struct rdt_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry_rcu(d, &r->domains, list) { + /* Find the domain that contains this CPU */ + if (cpumask_test_cpu(cpu, &d->cpu_mask)) + return d; + } + + return NULL; +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From 8d087c06fa5ad382b7af60aee7c495f30bb9019e Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 16:05:56 +0100 Subject: [PATCH 588/953] x86/resctrl: Describe resctrl's bitmap size assumptions ANBZ: #8626 commit 9a9ad2443351feb996d41f24cb382dac399a249d morse-linux. resctrl operates on configuration bitmaps and a bitmap of allocated CLOSID, both are stored in a u32. MPAM supports configuration/portion bitmaps and PARTIDs larger than will fit in a u32. Add some preprocessor values that make it clear why MPAM clamps some of these values. This will make it easier to find code related to these values if this resctrl behaviour ever changes. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- include/linux/resctrl.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 84420253dc05..f463fb949677 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -26,6 +26,17 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX +/* + * Resctrl uses a u32 as a closid bitmap. The maximum closid is 32. + */ +#define RESCTRL_MAX_CLOSID 32 + +/* + * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is + * 32. + */ +#define RESCTRL_MAX_CBM 32 + /** * struct pseudo_lock_region - pseudo-lock region information * @s: Resctrl schema for the resource to which this -- Gitee From d3a471b67c32bd25710de4f03617ad258813151c Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 8 Mar 2024 16:03:04 +0000 Subject: [PATCH 589/953] x86/resctrl: Rename resctrl_sched_in() to begin resctrl_arch_ ANBZ: #8626 commit bebb9ca3523eae808b16ac03718081436baaf99c morse-linux. resctrl_sched_in() loads the architecture specific CPU MSRs with the CLOSID and RMID values. This function was named before resctrl was split to have architecture specific code, and generic filesystem code. This function is obviously architecture specific, but does not begin with 'resctrl_arch_', making it the odd one out in the functions an architecture needs to support to enable resctrl. Rename it for concistency. This is purely cosmetic. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 4 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++++++------ arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 9940398e367e..491342f56811 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -177,7 +177,7 @@ static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, return READ_ONCE(tsk->rmid) == rmid; } -static inline void resctrl_sched_in(struct task_struct *tsk) +static inline void resctrl_arch_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); @@ -220,7 +220,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else -static inline void resctrl_sched_in(struct task_struct *tsk) {} +static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 085fb9c2333a..218aebd6387f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -359,7 +359,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, } /* - * This is safe against resctrl_sched_in() called from __switch_to() + * This is safe against resctrl_arch_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. @@ -378,7 +378,7 @@ void resctrl_arch_sync_cpu_defaults(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - resctrl_sched_in(current); + resctrl_arch_sched_in(current); } /* @@ -605,7 +605,7 @@ static void _update_task_closid_rmid(void *task) * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) - resctrl_sched_in(task); + resctrl_arch_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) @@ -663,7 +663,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. * This pairs with the full barrier between the rq->curr update and - * resctrl_sched_in() during context switch. + * resctrl_arch_sched_in() during context switch. */ smp_mb(); @@ -2946,8 +2946,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, /* * Order the closid/rmid stores above before the loads * in task_curr(). This pairs with the full barrier - * between the rq->curr update and resctrl_sched_in() - * during context switch. + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. */ smp_mb(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 708c87b88cc1..619560eb9f94 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 33b268747bb7..7bdbe94344a2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -659,7 +659,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } -- Gitee From 575535154d1a29e1c91ce6c1b13987c7910a2b2a Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 16:49:39 +0000 Subject: [PATCH 590/953] x86/resctrl: Drop __init/__exit on assorted symbols ANBZ: #8626 commit 63cf0f208aec2e54acfbae4314be2b4674f931bf morse-linux. Because ARM's MPAM controls are probed using MMIO, resctrl can't be initialised until enough CPUs are online to have determined the system-wide supported num_closid. Arm64 also supports 'late onlined secondaries', where only a subset of CPUs are online during boot. These two combine to mean the MPAM driver may not be able to initialise resctrl until user-space has brought 'enough' CPUs online. To allow MPAM to initialise resctrl after __init text has been free'd, remove all the __init markings from resctrl. The existing __exit markings cause these functions to be removed by the linker as it has never been possible to build resctrl as a module. MPAM has an error interrupt which causes the driver to reset and disable itself. Remove the __exit markings to allow the MPAM driver to tear down resctrl when an error occurs. Signed-off-by: James Morse --- If 'late onlined secondaries' is an alien concept, I can add a worked example to the commit message: If a system has two L3 caches, but during boot only CPU-0 is online, then no CPU is able to probe the features of the second L3 cache. It's not until user-space brings other CPUs online that the MPAM driver can finally get a glimpse of all the hardware to determine what properties the system has. [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 4 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- include/linux/resctrl.h | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3a3962736061..56218193a8ba 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -457,7 +457,7 @@ void closid_free(int closid); int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void __exit resctrl_mon_resource_exit(void); +void resctrl_mon_resource_exit(void); bool rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 8b316d9acc3b..7e6fca138cb7 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -954,7 +954,7 @@ static int dom_data_init(struct rdt_resource *r) return err; } -static void __exit dom_data_exit(struct rdt_resource *r) +static void dom_data_exit(struct rdt_resource *r) { if (!r->mon_capable) return; @@ -1076,7 +1076,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void __exit resctrl_mon_resource_exit(void) +void resctrl_mon_resource_exit(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 218aebd6387f..1425a33d201d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2069,7 +2069,7 @@ static struct rftype *rdtgroup_get_rftype_by_name(const char *name) return NULL; } -static void __init thread_throttle_mode_init(void) +static void thread_throttle_mode_init(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); struct rftype *rft; @@ -3997,7 +3997,7 @@ static void rdtgroup_destroy_root(void) rdtgroup_default.kn = NULL; } -static void __init rdtgroup_setup_default(void) +static void rdtgroup_setup_default(void) { mutex_lock(&rdtgroup_mutex); @@ -4190,7 +4190,7 @@ void resctrl_offline_cpu(unsigned int cpu) * * Return: 0 on success or -errno */ -int __init resctrl_init(void) +int resctrl_init(void) { int ret = 0; @@ -4244,7 +4244,7 @@ int __init resctrl_init(void) return ret; } -void __exit resctrl_exit(void) +void resctrl_exit(void) { debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f463fb949677..5da55e58f229 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -393,7 +393,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; -int __init resctrl_init(void); -void __exit resctrl_exit(void); +int resctrl_init(void); +void resctrl_exit(void); #endif /* _RESCTRL_H */ -- Gitee From e84cf2dcc1a9725875cf6a22351a9fe436940a1c Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 17:02:33 +0000 Subject: [PATCH 591/953] fs/resctrl: Add boiler plate for external resctrl code ANBZ: #8626 commit 54e9a22058364365403ce63bed514925200f9336 morse-linux. Add Makefile and Kconfig for fs/resctrl. Add ARCH_HAS_CPU_RESCTRL for the common parts of the resctrl interface and make X86_CPU_RESCTRL depend on this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- MAINTAINERS | 1 + arch/Kconfig | 8 ++++++++ arch/x86/Kconfig | 10 +++------- fs/Kconfig | 1 + fs/Makefile | 1 + fs/resctrl/Kconfig | 23 +++++++++++++++++++++++ fs/resctrl/Makefile | 3 +++ fs/resctrl/ctrlmondata.c | 0 fs/resctrl/internal.h | 0 fs/resctrl/monitor.c | 0 fs/resctrl/psuedo_lock.c | 0 fs/resctrl/rdtgroup.c | 0 include/linux/resctrl.h | 4 ++++ 13 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 fs/resctrl/Kconfig create mode 100644 fs/resctrl/Makefile create mode 100644 fs/resctrl/ctrlmondata.c create mode 100644 fs/resctrl/internal.h create mode 100644 fs/resctrl/monitor.c create mode 100644 fs/resctrl/psuedo_lock.c create mode 100644 fs/resctrl/rdtgroup.c diff --git a/MAINTAINERS b/MAINTAINERS index e9339a99862e..357621e9a72b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18075,6 +18075,7 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: fs/resctrl/ F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ diff --git a/arch/Kconfig b/arch/Kconfig index 20c2c93d2c88..f855a4cd82d4 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1325,6 +1325,14 @@ config STRICT_MODULE_RWX config ARCH_HAS_PHYS_TO_DMA bool +config ARCH_HAS_CPU_RESCTRL + bool + help + The 'resctrl' filesystem allows CPU controls of shared resources + such as caches and memory bandwidth to be configured. An architecture + selects this if it provides the arch-specific hooks for the filesystem + and needs the per-task CLOSID/RMID properties. + config HAVE_ARCH_COMPILER_H bool help diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1973f40663e7..ce947bac65dc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -479,8 +479,10 @@ config GOLDFISH config X86_CPU_RESCTRL bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) + depends on MISC_FILESYSTEMS select KERNFS - select PROC_CPU_RESCTRL if PROC_FS + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -498,12 +500,6 @@ config X86_CPU_RESCTRL Say N if unsure. -config RESCTRL_FS_PSEUDO_LOCK - bool - help - Software mechanism to pin data in a cache portion using - micro-architecture specific knowledge. - if X86_32 config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" diff --git a/fs/Kconfig b/fs/Kconfig index aa7e03cc1941..1e3ed753b9fe 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -325,6 +325,7 @@ source "fs/omfs/Kconfig" source "fs/hpfs/Kconfig" source "fs/qnx4/Kconfig" source "fs/qnx6/Kconfig" +source "fs/resctrl/Kconfig" source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index f9541f40be4e..b62375770dee 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_VBOXSF_FS) += vboxsf/ obj-$(CONFIG_ZONEFS_FS) += zonefs/ +obj-$(CONFIG_RESCTRL_FS) += resctrl/ diff --git a/fs/resctrl/Kconfig b/fs/resctrl/Kconfig new file mode 100644 index 000000000000..36a1ddbe6c21 --- /dev/null +++ b/fs/resctrl/Kconfig @@ -0,0 +1,23 @@ +config RESCTRL_FS + bool "CPU Resource Control Filesystem (resctrl)" + depends on ARCH_HAS_CPU_RESCTRL + select KERNFS + select PROC_CPU_RESCTRL if PROC_FS + help + Resctrl is a filesystem interface + to control allocation and + monitoring of system resources + used by the CPUs. + +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + +config RESCTRL_RMID_DEPENDS_ON_CLOSID + bool + help + Enable by the architecture when the RMID values depend on the CLOSID. + This causes the closid allocator to search for CLOSID with clean + RMID. diff --git a/fs/resctrl/Makefile b/fs/resctrl/Makefile new file mode 100644 index 000000000000..10fcfb0fdb10 --- /dev/null +++ b/fs/resctrl/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL_FS) += rdtgroup.o ctrlmondata.o monitor.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += psuedo_lock.o diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 5da55e58f229..f786ffceeda3 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -8,6 +8,10 @@ #include #include +#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL +#include +#endif + /* CLOSID, RMID value used by the default control group */ #define RESCTRL_RESERVED_CLOSID 0 #define RESCTRL_RESERVED_RMID 0 -- Gitee From 14b26ccb20b80dec6a20d7226cd36b90c612f386 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 22 Jun 2021 17:34:16 +0100 Subject: [PATCH 592/953] x86/resctrl: Move the filesystem bits to headers visible to fs/resctrl ANBZ: #8626 commit d59bc7452c5197cb300f0dd6f3d5a895039e0e59 morse-linux. Once the filesystem parts of resctrl move to fs/resctrl, it cannot rely on definitions in x86's internal.h. Move definitions in internal.h that need to be shared between the filesystem and architecture code to header files that fs/resctrl can include. Doing this separately means the filesystem code only moves between files of the same name, instead of having these changes mixed in too. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 3 +++ arch/x86/kernel/cpu/resctrl/core.c | 5 ++++ arch/x86/kernel/cpu/resctrl/internal.h | 36 -------------------------- include/linux/resctrl.h | 3 +++ include/linux/resctrl_types.h | 30 +++++++++++++++++++++ 5 files changed, 41 insertions(+), 36 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 491342f56811..746431c66fc4 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -218,6 +218,9 @@ int resctrl_arch_measure_l2_residency(void *_plr); int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); + #else static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index f0d87751c7cc..f7c424f3381f 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -306,6 +306,11 @@ static void rdt_get_cdp_l2_config(void) rdt_get_cdp_config(RDT_RESOURCE_L2); } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) +{ + return rdt_resources_all[l].cdp_enabled; +} + static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 56218193a8ba..0f7e3f10941b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -15,12 +15,6 @@ #define L2_QOS_CDP_ENABLE 0x01ULL -#define CQM_LIMBOCHECK_INTERVAL 1000 - -#define MBM_CNTR_WIDTH_BASE 24 -#define MBM_OVERFLOW_INTERVAL 1000 -#define MAX_MBA_BW 100u -#define MBA_IS_LINEAR 0x4 #define MBM_CNTR_WIDTH_OFFSET_AMD 20 #define RMID_VAL_ERROR BIT_ULL(63) @@ -210,29 +204,6 @@ struct rdtgroup { struct pseudo_lock_region *plr; }; -/* rdtgroup.flags */ -#define RDT_DELETED 1 - -/* rftype.flags */ -#define RFTYPE_FLAGS_CPUS_LIST 1 - -/* - * Define the file type flags for base and info directories. - */ -#define RFTYPE_INFO BIT(0) -#define RFTYPE_BASE BIT(1) -#define RFTYPE_CTRL BIT(4) -#define RFTYPE_MON BIT(5) -#define RFTYPE_TOP BIT(6) -#define RFTYPE_RES_CACHE BIT(8) -#define RFTYPE_RES_MB BIT(9) -#define RFTYPE_DEBUG BIT(10) -#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) -#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) - /* List of all resource groups */ extern struct list_head rdt_all_groups; @@ -370,13 +341,6 @@ static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) return &hw_res->r_resctrl; } -static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) -{ - return rdt_resources_all[l].cdp_enabled; -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); - /* * To return the common struct rdt_resource, which is contained in struct * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource. diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f786ffceeda3..00cc0457af50 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -41,6 +41,9 @@ int proc_resctrl_show(struct seq_file *m, */ #define RESCTRL_MAX_CBM 32 +extern unsigned int resctrl_rmid_realloc_limit; +extern unsigned int resctrl_rmid_realloc_threshold; + /** * struct pseudo_lock_region - pseudo-lock region information * @s: Resctrl schema for the resource to which this diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h index 4788bd95dac6..fe0b10b589c0 100644 --- a/include/linux/resctrl_types.h +++ b/include/linux/resctrl_types.h @@ -7,6 +7,36 @@ #ifndef __LINUX_RESCTRL_TYPES_H #define __LINUX_RESCTRL_TYPES_H +#define CQM_LIMBOCHECK_INTERVAL 1000 + +#define MBM_CNTR_WIDTH_BASE 24 +#define MBM_OVERFLOW_INTERVAL 1000 +#define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 + +/* rdtgroup.flags */ +#define RDT_DELETED 1 + +/* rftype.flags */ +#define RFTYPE_FLAGS_CPUS_LIST 1 + +/* + * Define the file type flags for base and info directories. + */ +#define RFTYPE_INFO BIT(0) +#define RFTYPE_BASE BIT(1) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) +#define RFTYPE_RES_CACHE BIT(8) +#define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) + /* Reads to Local DRAM Memory */ #define READS_TO_LOCAL_MEM BIT(0) -- Gitee From e6b012c399c585d7cd14beb335ed8567d957d036 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jan 2024 13:54:21 +0000 Subject: [PATCH 593/953] x86/resctrl: Move the resctrl filesystem code to /fs/resctrl ANBZ: #8626 commit f6d5f1a23554faed3bf9edb6103ec7e6d798674c morse-linux. resctrl is linux's defacto interface for managing cache and bandwidth policies for groups of tasks. To allow other architectures to make use of this pseudo filesystem, move it live in /fs/resctrl instead of /arch/x86. This move leaves behind the parts of resctrl that form the architecture interface for x86. Signed-off-by: James Morse --- Discussion needed on how/when to merge this, as it would conflict with all outstanding series. It's probably worth deferring to some opportune time, but is included here for illustration. [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 15 - arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 505 --- arch/x86/kernel/cpu/resctrl/internal.h | 310 -- arch/x86/kernel/cpu/resctrl/monitor.c | 821 ---- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 1093 ------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4288 +-------------------- fs/resctrl/ctrlmondata.c | 527 +++ fs/resctrl/internal.h | 340 ++ fs/resctrl/monitor.c | 843 ++++ fs/resctrl/psuedo_lock.c | 1122 ++++++ fs/resctrl/rdtgroup.c | 4013 +++++++++++++++++++ 11 files changed, 6992 insertions(+), 6885 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index f7c424f3381f..51edb6548977 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -164,21 +164,6 @@ static inline void cache_alloc_hsw_probe(void) rdt_alloc_capable = true; } -bool is_mba_sc(struct rdt_resource *r) -{ - if (!r) - r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - /* - * The software controller support is only applicable to MBA resource. - * Make sure to check for resource type. - */ - if (r->rid != RDT_RESOURCE_MBA) - return false; - - return r->membw.mba_sc; -} - /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index b4627ae19291..c5c3eaea27b6 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,260 +23,6 @@ #include "internal.h" -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - -typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); - -/* - * Check whether MBA bandwidth percentage value is correct. The value is - * checked against the minimum and max bandwidth values specified by the - * hardware. The allocated bandwidth percentage is rounded to the next - * control step available on the hardware. - */ -static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) -{ - unsigned long bw; - int ret; - - /* - * Only linear delay values is supported for current Intel SKUs. - */ - if (!r->membw.delay_linear && r->membw.arch_needs_linear) { - rdt_last_cmd_puts("No support for non-linear MB domains\n"); - return false; - } - - ret = kstrtoul(buf, 10, &bw); - if (ret) { - rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); - return false; - } - - if ((bw < r->membw.min_bw || bw > r->default_ctrl) && - !is_mba_sc(r)) { - rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, - r->membw.min_bw, r->default_ctrl); - return false; - } - - *data = roundup(bw, (unsigned long)r->membw.bw_gran); - return true; -} - -static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct resctrl_staged_config *cfg; - u32 closid = data->rdtgrp->closid; - struct rdt_resource *r = s->res; - unsigned long bw_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - if (!bw_validate(data->buf, &bw_val, r)) - return -EINVAL; - - if (is_mba_sc(r)) { - d->mbps_val[closid] = bw_val; - return 0; - } - - cfg->new_ctrl = bw_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Check whether a cache bit mask is valid. - * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: - * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 - * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 - * - * Haswell does not support a non-contiguous 1s value and additionally - * requires at least two bits set. - * AMD allows non-contiguous bitmasks. - */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - unsigned long first_bit, zero_bit, val; - unsigned int cbm_len = r->cache.cbm_len; - int ret; - - ret = kstrtoul(buf, 16, &val); - if (ret) { - rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); - return false; - } - - if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { - rdt_last_cmd_puts("Mask out of range\n"); - return false; - } - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Are non-contiguous bitmasks allowed? */ - if (!r->cache.arch_has_sparse_bitmasks && - (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { - rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); - return false; - } - - if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in the mask\n", - r->cache.min_cbm_bits); - return false; - } - - *data = val; - return true; -} - -/* - * Read one cache bit mask (hex). Check that it is valid for the current - * resource type. - */ -static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct rdtgroup *rdtgrp = data->rdtgrp; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 cbm_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - /* - * Cannot set up more than one pseudo-locked region in a cache - * hierarchy. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); - return -EINVAL; - } - - if (!cbm_validate(data->buf, &cbm_val, r)) - return -EINVAL; - - if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_SHAREABLE) && - rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); - return -EINVAL; - } - - /* - * The CBM may not overlap with the CBM of another closid if - * either is exclusive. - */ - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_puts("Overlaps with exclusive group\n"); - return -EINVAL; - } - - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { - if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_puts("Overlaps with other group\n"); - return -EINVAL; - } - } - - cfg->new_ctrl = cbm_val; - cfg->have_new_ctrl = true; - - return 0; -} - -static ctrlval_parser_t *get_parser(struct rdt_resource *res) -{ - if (res->fflags & RFTYPE_RES_CACHE) - return &parse_cbm; - else - return &parse_bw; -} - -/* - * For each domain in this resource we expect to find a series of: - * id=mask - * separated by ";". The "id" is in decimal, and must match one of - * the "id"s for this resource. - */ -static int parse_line(char *line, struct resctrl_schema *s, - struct rdtgroup *rdtgrp) -{ - ctrlval_parser_t *parse_ctrlval = get_parser(s->res); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - struct rdt_parse_data data; - char *dom = NULL, *id; - struct rdt_domain *d; - unsigned long dom_id; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { - rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); - return -EINVAL; - } - -next: - if (!line || line[0] == '\0') - return 0; - dom = strsep(&line, ";"); - id = strsep(&dom, "="); - if (!dom || kstrtoul(id, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); - return -EINVAL; - } - dom = strim(dom); - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - data.buf = dom; - data.rdtgrp = rdtgrp; - if (parse_ctrlval(&data, s, d)) - return -EINVAL; - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - cfg = &d->staged_config[t]; - /* - * In pseudo-locking setup mode and just - * parsed a valid CBM that should be - * pseudo-locked. Only one locked region per - * resource group and domain so just do - * the required initialization for single - * region and return. - */ - rdtgrp->plr->s = s; - rdtgrp->plr->d = d; - rdtgrp->plr->cbm = cfg->new_ctrl; - d->plr = rdtgrp->plr; - return 0; - } - goto next; - } - } - return -EINVAL; -} - static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) @@ -365,100 +111,6 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) return 0; } -static int rdtgroup_parse_resource(char *resname, char *tok, - struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - - list_for_each_entry(s, &resctrl_schema_all, list) { - if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) - return parse_line(tok, s, rdtgrp); - } - rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); - return -EINVAL; -} - -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct resctrl_schema *s; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - char *tok, *resname; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - /* - * No changes to pseudo-locked region allowed. It has to be removed - * and re-created instead. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = -EINVAL; - rdt_last_cmd_puts("Resource group is pseudo-locked\n"); - goto out; - } - - rdt_staged_configs_clear(); - - while ((tok = strsep(&buf, "\n")) != NULL) { - resname = strim(strsep(&tok, ":")); - if (!tok) { - rdt_last_cmd_puts("Missing ':'\n"); - ret = -EINVAL; - goto out; - } - if (tok[0] == '\0') { - rdt_last_cmd_printf("Missing '%s' value\n", resname); - ret = -EINVAL; - goto out; - } - ret = rdtgroup_parse_resource(resname, tok, rdtgrp); - if (ret) - goto out; - } - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - - /* - * Writes to mba_sc resources update the software controller, - * not the control MSR. - */ - if (is_mba_sc(r)) - continue; - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret) - goto out; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * If pseudo-locking fails we keep the resource group in - * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service - * active and updated for just the domain the pseudo-locked - * region was requested for. - */ - ret = rdtgroup_pseudo_lock_create(rdtgrp); - } - -out: - rdt_staged_configs_clear(); - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { @@ -467,160 +119,3 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, return hw_dom->ctrl_val[idx]; } - -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) -{ - struct rdt_resource *r = schema->res; - struct rdt_domain *dom; - bool sep = false; - u32 ctrl_val; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - if (is_mba_sc(r)) - ctrl_val = dom->mbps_val[closid]; - else - ctrl_val = resctrl_arch_get_config(r, dom, closid, - schema->conf_type); - - seq_printf(s, r->format_str, dom->id, max_data_width, - ctrl_val); - sep = true; - } - seq_puts(s, "\n"); -} - -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - struct rdtgroup *rdtgrp; - int ret = 0; - u32 closid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - list_for_each_entry(schema, &resctrl_schema_all, list) { - seq_printf(s, "%s:uninitialized\n", schema->name); - } - } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%s:%d=%x\n", - rdtgrp->plr->s->res->name, - rdtgrp->plr->d->id, - rdtgrp->plr->cbm); - } - } else { - closid = rdtgrp->closid; - list_for_each_entry(schema, &resctrl_schema_all, list) { - if (closid < schema->num_closid) - show_doms(s, schema, closid); - } - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); - return ret; -} - -static int smp_mon_event_count(void *arg) -{ - mon_event_count(arg); - - return 0; -} - -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first) -{ - int cpu; - - /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - /* - * Setup the parameters to pass to mon_event_count() to read the data. - */ - rr->rgrp = rdtgrp; - rr->evtid = evtid; - rr->r = r; - rr->d = d; - rr->val = 0; - rr->first = first; - rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); - if (IS_ERR(rr->arch_mon_ctx)) { - rr->err = -EINVAL; - return; - } - - cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); - - /* - * cpumask_any_housekeeping() prefers housekeeping CPUs, but - * are all the CPUs nohz_full? If yes, pick a CPU to IPI. - * MPAM's resctrl_arch_rmid_read() is unable to read the - * counters on some platforms if its called in IRQ context. - */ - if (tick_nohz_full_cpu(cpu)) - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); - else - smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); - - resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); -} - -int rdtgroup_mondata_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - u32 resid, evtid, domid; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - union mon_data_bits md; - struct rdt_domain *d; - struct rmid_read rr; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto out; - } - - md.priv = of->kn->priv; - resid = md.u.rid; - domid = md.u.domid; - evtid = md.u.evtid; - - r = resctrl_arch_get_resource(resid); - d = resctrl_arch_find_domain(r, domid); - if (IS_ERR_OR_NULL(d)) { - ret = -ENOENT; - goto out; - } - - mon_event_read(&rr, r, d, rdtgrp, evtid, false); - - if (rr.err == -EIO) - seq_puts(m, "Error\n"); - else if (rr.err == -EINVAL) - seq_puts(m, "Unavailable\n"); - else - seq_printf(m, "%llu\n", rr.val); - -out: - rdtgroup_kn_unlock(of->kn); - return ret; -} diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 0f7e3f10941b..bf3538992667 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -26,227 +26,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/** - * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that - * aren't marked nohz_full - * @mask: The mask to pick a CPU from. - * @exclude_cpu:The CPU to avoid picking. - * - * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping - * CPUs that don't use nohz_full, these are preferred. Pass - * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. - * - * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. - */ -static inline unsigned int -cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) -{ - unsigned int cpu, hk_cpu; - - if (exclude_cpu == RESCTRL_PICK_ANY_CPU) - cpu = cpumask_any(mask); - else - cpu = cpumask_any_but(mask, exclude_cpu); - - if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) - return cpu; - - /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ - if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) - return cpu; - - /* Try to find a CPU that isn't nohz_full to use in preference */ - hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); - if (hk_cpu == exclude_cpu) - hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); - - if (hk_cpu < nr_cpu_ids) - cpu = hk_cpu; - - return cpu; -} - -struct rdt_fs_context { - struct kernfs_fs_context kfc; - bool enable_cdpl2; - bool enable_cdpl3; - bool enable_mba_mbps; - bool enable_debug; -}; - -static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) -{ - struct kernfs_fs_context *kfc = fc->fs_private; - - return container_of(kfc, struct rdt_fs_context, kfc); -} - -/** - * struct mon_evt - Entry in the event list of a resource - * @evtid: event id - * @name: name of the event - * @configurable: true if the event is configurable - * @list: entry in &rdt_resource->evt_list - */ -struct mon_evt { - enum resctrl_event_id evtid; - char *name; - bool configurable; - struct list_head list; -}; - -/** - * union mon_data_bits - Monitoring details for each event file - * @priv: Used to store monitoring event data in @u - * as kernfs private data - * @rid: Resource id associated with the event file - * @evtid: Event id associated with the event file - * @domid: The domain to which the event file belongs - * @u: Name of the bit fields struct - */ -union mon_data_bits { - void *priv; - struct { - unsigned int rid : 10; - enum resctrl_event_id evtid : 8; - unsigned int domid : 14; - } u; -}; - -struct rmid_read { - struct rdtgroup *rgrp; - struct rdt_resource *r; - struct rdt_domain *d; - enum resctrl_event_id evtid; - bool first; - int err; - u64 val; - void *arch_mon_ctx; -}; - -extern struct list_head resctrl_schema_all; -extern bool resctrl_mounted; - -enum rdt_group_type { - RDTCTRL_GROUP = 0, - RDTMON_GROUP, - RDT_NUM_GROUP, -}; - -/** - * enum rdtgrp_mode - Mode of a RDT resource group - * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations - * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed - * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking - * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations - * allowed AND the allocations are Cache Pseudo-Locked - * @RDT_NUM_MODES: Total number of modes - * - * The mode of a resource group enables control over the allowed overlap - * between allocations associated with different resource groups (classes - * of service). User is able to modify the mode of a resource group by - * writing to the "mode" resctrl file associated with the resource group. - * - * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by - * writing the appropriate text to the "mode" file. A resource group enters - * "pseudo-locked" mode after the schemata is written while the resource - * group is in "pseudo-locksetup" mode. - */ -enum rdtgrp_mode { - RDT_MODE_SHAREABLE = 0, - RDT_MODE_EXCLUSIVE, - RDT_MODE_PSEUDO_LOCKSETUP, - RDT_MODE_PSEUDO_LOCKED, - - /* Must be last */ - RDT_NUM_MODES, -}; - -/** - * struct mongroup - store mon group's data in resctrl fs. - * @mon_data_kn: kernfs node for the mon_data directory - * @parent: parent rdtgrp - * @crdtgrp_list: child rdtgroup node list - * @rmid: rmid for this rdtgroup - */ -struct mongroup { - struct kernfs_node *mon_data_kn; - struct rdtgroup *parent; - struct list_head crdtgrp_list; - u32 rmid; -}; - -/** - * struct rdtgroup - store rdtgroup's data in resctrl file system. - * @kn: kernfs node - * @rdtgroup_list: linked list for all rdtgroups - * @closid: closid for this rdtgroup - * @cpu_mask: CPUs assigned to this rdtgroup - * @flags: status bits - * @waitcount: how many cpus expect to find this - * group when they acquire rdtgroup_mutex - * @type: indicates type of this rdtgroup - either - * monitor only or ctrl_mon group - * @mon: mongroup related data - * @mode: mode of resource group - * @plr: pseudo-locked region - */ -struct rdtgroup { - struct kernfs_node *kn; - struct list_head rdtgroup_list; - u32 closid; - struct cpumask cpu_mask; - int flags; - atomic_t waitcount; - enum rdt_group_type type; - struct mongroup mon; - enum rdtgrp_mode mode; - struct pseudo_lock_region *plr; -}; - -/* List of all resource groups */ -extern struct list_head rdt_all_groups; - -extern int max_name_width, max_data_width; - -/** - * struct rftype - describe each file in the resctrl file system - * @name: File name - * @mode: Access mode - * @kf_ops: File operations - * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RFTYPE_* flags - * @seq_show: Show content of the file - * @write: Write to the file - */ -struct rftype { - char *name; - umode_t mode; - const struct kernfs_ops *kf_ops; - unsigned long flags; - unsigned long fflags; - - int (*seq_show)(struct kernfs_open_file *of, - struct seq_file *sf, void *v); - /* - * write() is the generic write callback which maps directly to - * kernfs write operation and overrides all other operations. - * Maximum write size is determined by ->max_write_len. - */ - ssize_t (*write)(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -}; - -/** - * struct mbm_state - status for each MBM counter in each domain - * @prev_bw_bytes: Previous bytes value read for bandwidth calculation - * @prev_bw: The most recent bandwidth in MBps - */ -struct mbm_state { - u64 prev_bw_bytes; - u32 prev_bw; -}; - /** * struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s * return value. @@ -327,11 +106,7 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -extern struct mutex rdtgroup_mutex; - extern struct rdt_hw_resource rdt_resources_all[]; -extern struct rdtgroup rdtgroup_default; -extern struct dentry *debugfs_resctrl; static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { @@ -395,95 +170,10 @@ union cpuid_0x10_x_edx { unsigned int full; }; -void rdt_last_cmd_clear(void); -void rdt_last_cmd_puts(const char *s); -__printf(1, 2) -void rdt_last_cmd_printf(const char *fmt, ...); - void rdt_ctrl_update(void *arg); -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); -void rdtgroup_kn_unlock(struct kernfs_node *kn); -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask); -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v); -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive); -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm); -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); -int rdtgroup_tasks_assigned(struct rdtgroup *r); -int closids_supported(void); -void closid_free(int closid); -int alloc_rmid(u32 closid); -void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void resctrl_mon_resource_exit(void); bool rdt_cpu_has(int flag); -void mon_event_count(void *info); -int rdtgroup_mondata_show(struct seq_file *m, void *arg); -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first); -int resctrl_mon_resource_init(void); -void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms, - int exclude_cpu); -void mbm_handle_overflow(struct work_struct *work); void __init intel_rdt_mbm_apply_quirk(void); -bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu); -void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_domain *d); -void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void mbm_config_rftype_init(const char *config); -void rdt_staged_configs_clear(void); -bool closid_allocated(unsigned int closid); -int resctrl_find_cleanest_closid(void); - -#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); -#else -static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - return false; -} - -static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - return false; -} - -static inline int rdt_pseudo_lock_init(void) { return 0; } -static inline void rdt_pseudo_lock_release(void) { } -static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } -#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 7e6fca138cb7..02fb9d87479a 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -25,53 +25,6 @@ #include "internal.h" -/** - * struct rmid_entry - dirty tracking for all RMID. - * @closid: The CLOSID for this entry. - * @rmid: The RMID for this entry. - * @busy: The number of domains with cached data using this RMID. - * @list: Member of the rmid_free_lru list when busy == 0. - * - * Depending on the architecture the correct monitor is accessed using - * both @closid and @rmid, or @rmid only. - * - * Take the rdtgroup_mutex when accessing. - */ -struct rmid_entry { - u32 closid; - u32 rmid; - int busy; - struct list_head list; -}; - -/* - * @rmid_free_lru - A least recently used list of free RMIDs - * These RMIDs are guaranteed to have an occupancy less than the - * threshold occupancy - */ -static LIST_HEAD(rmid_free_lru); - -/* - * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. - * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. - * Indexed by CLOSID. Protected by rdtgroup_mutex. - */ -static u32 *closid_num_dirty_rmid; - -/* - * @rmid_limbo_count - count of currently unused but (potentially) - * dirty RMIDs. - * This counts RMIDs that no one is currently using but that - * may have a occupancy value > resctrl_rmid_realloc_threshold. User can - * change the threshold occupancy value. - */ -static unsigned int rmid_limbo_count; - -/* - * @rmid_entry - The entry in the limbo and free lists. - */ -static struct rmid_entry *rmid_ptrs; - /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. @@ -83,17 +36,6 @@ bool rdt_mon_capable; */ unsigned int rdt_mon_features; -/* - * This is the threshold cache occupancy in bytes at which we will consider an - * RMID available for re-allocation. - */ -unsigned int resctrl_rmid_realloc_threshold; - -/* - * This is the maximum value for the reallocation threshold, in bytes. - */ -unsigned int resctrl_rmid_realloc_limit; - #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) /* @@ -157,33 +99,6 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -/* - * x86 and arm64 differ in their handling of monitoring. - * x86's RMID are independent numbers, there is only one source of traffic - * with an RMID value of '1'. - * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of - * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID - * value is no longer unique. - * To account for this, resctrl uses an index. On x86 this is just the RMID, - * on arm64 it encodes the CLOSID and RMID. This gives a unique number. - * - * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code - * must accept an attempt to read every index. - */ -static inline struct rmid_entry *__rmid_entry(u32 idx) -{ - struct rmid_entry *entry; - u32 closid, rmid; - - entry = &rmid_ptrs[idx]; - resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); - - WARN_ON_ONCE(entry->closid != closid); - WARN_ON_ONCE(entry->rmid != rmid); - - return entry; -} - static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) { u64 msr_val; @@ -302,735 +217,6 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } -static void limbo_release_entry(struct rmid_entry *entry) -{ - lockdep_assert_held(&rdtgroup_mutex); - - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]--; -} - -/* - * Check the RMIDs that are marked as busy for this domain. If the - * reported LLC occupancy is below the threshold clear the busy bit and - * decrement the count. If the busy count gets to zero on an RMID, we - * free the RMID - */ -void __check_limbo(struct rdt_domain *d, bool force_free) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - struct rmid_entry *entry; - u32 idx, cur_idx = 1; - void *arch_mon_ctx; - bool rmid_dirty; - u64 val = 0; - - arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); - if (IS_ERR(arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(arch_mon_ctx)); - return; - } - - /* - * Skip RMID 0 and start from RMID 1 and check all the RMIDs that - * are marked as busy for occupancy < threshold. If the occupancy - * is less than the threshold decrement the busy counter of the - * RMID and move it to the free list when the counter reaches 0. - */ - for (;;) { - idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); - if (idx >= idx_limit) - break; - - entry = __rmid_entry(idx); - if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val, - arch_mon_ctx)) { - rmid_dirty = true; - } else { - rmid_dirty = (val >= resctrl_rmid_realloc_threshold); - } - - if (force_free || !rmid_dirty) { - clear_bit(idx, d->rmid_busy_llc); - if (!--entry->busy) - limbo_release_entry(entry); - } - cur_idx = idx + 1; - } - - resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); -} - -bool has_busy_rmid(struct rdt_domain *d) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - - return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; -} - -static struct rmid_entry *resctrl_find_free_rmid(u32 closid) -{ - struct rmid_entry *itr; - u32 itr_idx, cmp_idx; - - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); - - list_for_each_entry(itr, &rmid_free_lru, list) { - /* - * Get the index of this free RMID, and the index it would need - * to be if it were used with this CLOSID. - * If the CLOSID is irrelevant on this architecture, the two - * index values are always the same on every entry and thus the - * very first entry will be returned. - */ - itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); - cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); - - if (itr_idx == cmp_idx) - return itr; - } - - return ERR_PTR(-ENOSPC); -} - -/** - * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated - * RMID are clean, or the CLOSID that has - * the most clean RMID. - * - * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID - * may not be able to allocate clean RMID. To avoid this the allocator will - * choose the CLOSID with the most clean RMID. - * - * When the CLOSID and RMID are independent numbers, the first free CLOSID will - * be returned. - */ -int resctrl_find_cleanest_closid(void) -{ - u32 cleanest_closid = ~0; - int i = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - return -EIO; - - for (i = 0; i < closids_supported(); i++) { - int num_dirty; - - if (closid_allocated(i)) - continue; - - num_dirty = closid_num_dirty_rmid[i]; - if (num_dirty == 0) - return i; - - if (cleanest_closid == ~0) - cleanest_closid = i; - - if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) - cleanest_closid = i; - } - - if (cleanest_closid == ~0) - return -ENOSPC; - - return cleanest_closid; -} - -/* - * For MPAM the RMID value is not unique, and has to be considered with - * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which - * allows all domains to be managed by a single free list. - * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. - */ -int alloc_rmid(u32 closid) -{ - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - entry = resctrl_find_free_rmid(closid); - if (IS_ERR(entry)) - return PTR_ERR(entry); - - list_del(&entry->list); - return entry->rmid; -} - -static void add_rmid_to_limbo(struct rmid_entry *entry) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdt_domain *d; - u32 idx; - - lockdep_assert_held(&rdtgroup_mutex); - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); - - entry->busy = 0; - list_for_each_entry(d, &r->domains, list) { - /* - * For the first limbo RMID in the domain, - * setup up the limbo worker. - */ - if (!has_busy_rmid(d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, - RESCTRL_PICK_ANY_CPU); - set_bit(idx, d->rmid_busy_llc); - entry->busy++; - } - - rmid_limbo_count++; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]++; -} - -void free_rmid(u32 closid, u32 rmid) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - /* - * Do not allow the default rmid to be free'd. Comparing by index - * allows architectures that ignore the closid parameter to avoid an - * unnecessary check. - */ - if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, - RESCTRL_RESERVED_RMID)) - return; - - entry = __rmid_entry(idx); - - if (resctrl_arch_is_llc_occupancy_enabled()) - add_rmid_to_limbo(entry); - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, - u32 rmid, enum resctrl_event_id evtid) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[idx]; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[idx]; - default: - return NULL; - } -} - -static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m; - u64 tval = 0; - - if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); - m = get_mbm_state(rr->d, closid, rmid, rr->evtid); - if (m) - memset(m, 0, sizeof(struct mbm_state)); - return 0; - } - - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, - &tval, rr->arch_mon_ctx); - if (rr->err) - return rr->err; - - rr->val += tval; - - return 0; -} - -/* - * mbm_bw_count() - Update bw count from values previously read by - * __mon_event_count(). - * @closid: The closid used to identify the cached mbm_state. - * @rmid: The rmid used to identify the cached mbm_state. - * @rr: The struct rmid_read populated by __mon_event_count(). - * - * Supporting function to calculate the memory bandwidth - * and delta bandwidth in MBps. The chunks value previously read by - * __mon_event_count() is compared with the chunks value from the previous - * invocation. This must be called once per second to maintain values in MBps. - */ -static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - struct mbm_state *m = &rr->d->mbm_local[idx]; - u64 cur_bw, bytes, cur_bytes; - - cur_bytes = rr->val; - bytes = cur_bytes - m->prev_bw_bytes; - m->prev_bw_bytes = cur_bytes; - - cur_bw = bytes / SZ_1M; - - m->prev_bw = cur_bw; -} - -/* - * This is scheduled by mon_event_read() to read the CQM/MBM counters - * on a domain. - */ -void mon_event_count(void *info) -{ - struct rdtgroup *rdtgrp, *entry; - struct rmid_read *rr = info; - struct list_head *head; - int ret; - - rdtgrp = rr->rgrp; - - ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); - - /* - * For Ctrl groups read data from child monitor groups and - * add them together. Count events which are read successfully. - * Discard the rmid_read's reporting errors. - */ - head = &rdtgrp->mon.crdtgrp_list; - - if (rdtgrp->type == RDTCTRL_GROUP) { - list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->closid, entry->mon.rmid, - rr) == 0) - ret = 0; - } - } - - /* - * __mon_event_count() calls for newly created monitor groups may - * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. - * Discard error if any of the monitor event reads succeeded. - */ - if (ret == 0) - rr->err = 0; -} - -/* - * Feedback loop for MBA software controller (mba_sc) - * - * mba_sc is a feedback loop where we periodically read MBM counters and - * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so - * that: - * - * current bandwidth(cur_bw) < user specified bandwidth(user_bw) - * - * This uses the MBM counters to measure the bandwidth and MBA throttle - * MSRs to control the bandwidth for a particular rdtgrp. It builds on the - * fact that resctrl rdtgroups have both monitoring and control. - * - * The frequency of the checks is 1s and we just tag along the MBM overflow - * timer. Having 1s interval makes the calculation of bandwidth simpler. - * - * Although MBA's goal is to restrict the bandwidth to a maximum, there may - * be a need to increase the bandwidth to avoid unnecessarily restricting - * the L2 <-> L3 traffic. - * - * Since MBA controls the L2 external bandwidth where as MBM measures the - * L3 external bandwidth the following sequence could lead to such a - * situation. - * - * Consider an rdtgroup which had high L3 <-> memory traffic in initial - * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but - * after some time rdtgroup has mostly L2 <-> L3 traffic. - * - * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its - * throttle MSRs already have low percentage values. To avoid - * unnecessarily restricting such rdtgroups, we also increase the bandwidth. - */ -static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) -{ - u32 closid, rmid, cur_msr_val, new_msr_val; - struct mbm_state *pmbm_data, *cmbm_data; - struct rdt_resource *r_mba; - struct rdt_domain *dom_mba; - u32 cur_bw, user_bw, idx; - struct list_head *head; - struct rdtgroup *entry; - - if (!resctrl_arch_is_mbm_local_enabled()) - return; - - r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - closid = rgrp->closid; - rmid = rgrp->mon.rmid; - idx = resctrl_arch_rmid_idx_encode(closid, rmid); - pmbm_data = &dom_mbm->mbm_local[idx]; - - dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); - if (!dom_mba) { - pr_warn_once("Failure to get domain for MBA update\n"); - return; - } - - cur_bw = pmbm_data->prev_bw; - user_bw = dom_mba->mbps_val[closid]; - - /* MBA resource doesn't support CDP */ - cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); - - /* - * For Ctrl groups read data from child monitor groups. - */ - head = &rgrp->mon.crdtgrp_list; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cur_bw += cmbm_data->prev_bw; - } - - /* - * Scale up/down the bandwidth linearly for the ctrl group. The - * bandwidth step is the bandwidth granularity specified by the - * hardware. - * Always increase throttling if current bandwidth is above the - * target set by user. - * But avoid thrashing up and down on every poll by checking - * whether a decrease in throttling is likely to push the group - * back over target. E.g. if currently throttling to 30% of bandwidth - * on a system with 10% granularity steps, check whether moving to - * 40% would go past the limit by multiplying current bandwidth by - * "(30 + 10) / 30". - */ - if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { - new_msr_val = cur_msr_val - r_mba->membw.bw_gran; - } else if (cur_msr_val < MAX_MBA_BW && - (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { - new_msr_val = cur_msr_val + r_mba->membw.bw_gran; - } else { - return; - } - - resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); -} - -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, - u32 closid, u32 rmid) -{ - struct rmid_read rr; - - rr.first = false; - rr.r = r; - rr.d = d; - - /* - * This is protected from concurrent reads from user - * as both the user and we hold the global mutex. - */ - if (resctrl_arch_is_mbm_total_enabled()) { - rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; - rr.val = 0; - rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); - if (IS_ERR(rr.arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(rr.arch_mon_ctx)); - return; - } - - __mon_event_count(closid, rmid, &rr); - - resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); - } - if (resctrl_arch_is_mbm_local_enabled()) { - rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; - rr.val = 0; - rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); - if (IS_ERR(rr.arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(rr.arch_mon_ctx)); - return; - } - - __mon_event_count(closid, rmid, &rr); - - /* - * Call the MBA software controller only for the - * control groups and when user has enabled - * the software controller explicitly. - */ - if (is_mba_sc(NULL)) - mbm_bw_count(closid, rmid, &rr); - - resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); - } -} - -/* - * Handler to scan the limbo list and move the RMIDs - * to free list whose occupancy < threshold_occupancy. - */ -void cqm_handle_limbo(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - struct rdt_domain *d; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - d = container_of(work, struct rdt_domain, cqm_limbo.work); - - __check_limbo(d, false); - - if (has_busy_rmid(d)) { - d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, - RESCTRL_PICK_ANY_CPU); - schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, - delay); - } - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -/** - * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this - * domain. - * @dom: The domain the limbo handler should run for. - * @delay_ms: How far in the future the handler should run. - * @exclude_cpu: Which CPU the handler should not run on, - * RESCTRL_PICK_ANY_CPU to pick any CPU. - */ -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); - dom->cqm_work_cpu = cpu; - - if (cpu < nr_cpu_ids) - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); -} - -void mbm_handle_overflow(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - struct rdt_resource *r; - struct rdt_domain *d; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - /* - * If the filesystem has been unmounted this work no longer needs to - * run. - */ - if (!resctrl_mounted || !resctrl_arch_mon_capable()) - goto out_unlock; - - r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - d = container_of(work, struct rdt_domain, mbm_over.work); - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); - - if (is_mba_sc(NULL)) - update_mba_bw(prgrp, d); - } - - /* - * Re-check for housekeeping CPUs. This allows the overflow handler to - * move off a nohz_full CPU quickly. - */ - d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, - RESCTRL_PICK_ANY_CPU); - schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -/** - * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this - * domain. - * @dom: The domain the overflow handler should run for. - * @delay_ms: How far in the future the handler should run. - * @exclude_cpu: Which CPU the handler should not run on, - * RESCTRL_PICK_ANY_CPU to pick any CPU. - */ -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - /* - * When a domain comes online there is no guarantee the filesystem is - * mounted. If not, there is no need to catch counter overflow. - */ - if (!resctrl_mounted || !resctrl_arch_mon_capable()) - return; - cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); - dom->mbm_work_cpu = cpu; - - if (cpu < nr_cpu_ids) - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); -} - -static int dom_data_init(struct rdt_resource *r) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rmid_entry *entry = NULL; - int err = 0, i; - u32 idx; - - mutex_lock(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - u32 *tmp; - - /* - * If the architecture hasn't provided a sanitised value here, - * this may result in larger arrays than necessary. Resctrl will - * use a smaller system wide value based on the resources in - * use. - */ - tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); - if (!tmp) { - err = -ENOMEM; - goto out_unlock; - } - - closid_num_dirty_rmid = tmp; - } - - rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) { - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - kfree(closid_num_dirty_rmid); - closid_num_dirty_rmid = NULL; - } - err = -ENOMEM; - goto out_unlock; - } - - for (i = 0; i < idx_limit; i++) { - entry = &rmid_ptrs[i]; - INIT_LIST_HEAD(&entry->list); - - resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); - list_add_tail(&entry->list, &rmid_free_lru); - } - - /* - * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and - * are always allocated. These are used for the rdtgroup_default - * control group, which will be setup later in rdtgroup_init(). - */ - idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, - RESCTRL_RESERVED_RMID); - entry = __rmid_entry(idx); - list_del(&entry->list); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - - return err; -} - -static void dom_data_exit(struct rdt_resource *r) -{ - if (!r->mon_capable) - return; - - mutex_lock(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - kfree(closid_num_dirty_rmid); - closid_num_dirty_rmid = NULL; - } - - kfree(rmid_ptrs); - rmid_ptrs = NULL; - - mutex_unlock(&rdtgroup_mutex); -} - -static struct mon_evt llc_occupancy_event = { - .name = "llc_occupancy", - .evtid = QOS_L3_OCCUP_EVENT_ID, -}; - -static struct mon_evt mbm_total_event = { - .name = "mbm_total_bytes", - .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, -}; - -static struct mon_evt mbm_local_event = { - .name = "mbm_local_bytes", - .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, -}; - -/* - * Initialize the event list for the resource. - * - * Note that MBM events are also part of RDT_RESOURCE_L3 resource - * because as per the SDM the total and local memory bandwidth - * are enumerated as part of L3 monitoring. - */ -static void l3_mon_evt_init(struct rdt_resource *r) -{ - INIT_LIST_HEAD(&r->evt_list); - - if (resctrl_arch_is_llc_occupancy_enabled()) - list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (resctrl_arch_is_mbm_total_enabled()) - list_add_tail(&mbm_total_event.list, &r->evt_list); - if (resctrl_arch_is_mbm_local_enabled()) - list_add_tail(&mbm_local_event.list, &r->evt_list); -} - -int resctrl_mon_resource_init(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - int ret; - - if (!r->mon_capable) - return 0; - - ret = dom_data_init(r); - if (ret) - return ret; - - l3_mon_evt_init(r); - - if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } - - return 0; -} - int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; @@ -1076,13 +262,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void resctrl_mon_resource_exit(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - - dom_data_exit(r); -} - void __init intel_rdt_mbm_apply_quirk(void) { int cf_index; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index ba51ab1f70e6..ba1596afee10 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -39,28 +39,6 @@ */ static u64 prefetch_disable_bits; -/* - * Major number assigned to and shared by all devices exposing - * pseudo-locked regions. - */ -static unsigned int pseudo_lock_major; -static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); - -static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) -{ - const struct rdtgroup *rdtgrp; - - rdtgrp = dev_get_drvdata(dev); - if (mode) - *mode = 0600; - return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); -} - -static const struct class pseudo_lock_class = { - .name = "pseudo_lock", - .devnode = pseudo_lock_devnode, -}; - /** * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported * platforms @@ -121,299 +99,6 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) return prefetch_disable_bits; } -/** - * pseudo_lock_minor_get - Obtain available minor number - * @minor: Pointer to where new minor number will be stored - * - * A bitmask is used to track available minor numbers. Here the next free - * minor number is marked as unavailable and returned. - * - * Return: 0 on success, <0 on failure. - */ -static int pseudo_lock_minor_get(unsigned int *minor) -{ - unsigned long first_bit; - - first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); - - if (first_bit == MINORBITS) - return -ENOSPC; - - __clear_bit(first_bit, &pseudo_lock_minor_avail); - *minor = first_bit; - - return 0; -} - -/** - * pseudo_lock_minor_release - Return minor number to available - * @minor: The minor number made available - */ -static void pseudo_lock_minor_release(unsigned int minor) -{ - __set_bit(minor, &pseudo_lock_minor_avail); -} - -/** - * region_find_by_minor - Locate a pseudo-lock region by inode minor number - * @minor: The minor number of the device representing pseudo-locked region - * - * When the character device is accessed we need to determine which - * pseudo-locked region it belongs to. This is done by matching the minor - * number of the device to the pseudo-locked region it belongs. - * - * Minor numbers are assigned at the time a pseudo-locked region is associated - * with a cache instance. - * - * Return: On success return pointer to resource group owning the pseudo-locked - * region, NULL on failure. - */ -static struct rdtgroup *region_find_by_minor(unsigned int minor) -{ - struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->plr && rdtgrp->plr->minor == minor) { - rdtgrp_match = rdtgrp; - break; - } - } - return rdtgrp_match; -} - -/** - * struct pseudo_lock_pm_req - A power management QoS request list entry - * @list: Entry within the @pm_reqs list for a pseudo-locked region - * @req: PM QoS request - */ -struct pseudo_lock_pm_req { - struct list_head list; - struct dev_pm_qos_request req; -}; - -static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req, *next; - - list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { - dev_pm_qos_remove_request(&pm_req->req); - list_del(&pm_req->list); - kfree(pm_req); - } -} - -/** - * pseudo_lock_cstates_constrain - Restrict cores from entering C6 - * @plr: Pseudo-locked region - * - * To prevent the cache from being affected by power management entering - * C6 has to be avoided. This is accomplished by requesting a latency - * requirement lower than lowest C6 exit latency of all supported - * platforms as found in the cpuidle state tables in the intel_idle driver. - * At this time it is possible to do so with a single latency requirement - * for all supported platforms. - * - * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, - * the ACPI latencies need to be considered while keeping in mind that C2 - * may be set to map to deeper sleep states. In this case the latency - * requirement needs to prevent entering C2 also. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req; - int cpu; - int ret; - - for_each_cpu(cpu, &plr->d->cpu_mask) { - pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); - if (!pm_req) { - rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); - ret = -ENOMEM; - goto out_err; - } - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pm_req->req, - DEV_PM_QOS_RESUME_LATENCY, - 30); - if (ret < 0) { - rdt_last_cmd_printf("Failed to add latency req CPU%d\n", - cpu); - kfree(pm_req); - ret = -1; - goto out_err; - } - list_add(&pm_req->list, &plr->pm_reqs); - } - - return 0; - -out_err: - pseudo_lock_cstates_relax(plr); - return ret; -} - -/** - * pseudo_lock_region_clear - Reset pseudo-lock region data - * @plr: pseudo-lock region - * - * All content of the pseudo-locked region is reset - any memory allocated - * freed. - * - * Return: void - */ -static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) -{ - plr->size = 0; - plr->line_size = 0; - kfree(plr->kmem); - plr->kmem = NULL; - plr->s = NULL; - if (plr->d) - plr->d->plr = NULL; - plr->d = NULL; - plr->cbm = 0; - plr->debugfs_dir = NULL; -} - -/** - * pseudo_lock_region_init - Initialize pseudo-lock region information - * @plr: pseudo-lock region - * - * Called after user provided a schemata to be pseudo-locked. From the - * schemata the &struct pseudo_lock_region is on entry already initialized - * with the resource, domain, and capacity bitmask. Here the information - * required for pseudo-locking is deduced from this data and &struct - * pseudo_lock_region initialized further. This information includes: - * - size in bytes of the region to be pseudo-locked - * - cache line size to know the stride with which data needs to be accessed - * to be pseudo-locked - * - a cpu associated with the cache instance on which the pseudo-locking - * flow can be executed - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_init(struct pseudo_lock_region *plr) -{ - struct cpu_cacheinfo *ci; - int ret; - int i; - - /* Pick the first cpu we find that is associated with the cache. */ - plr->cpu = cpumask_first(&plr->d->cpu_mask); - - if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("CPU %u associated with cache not online\n", - plr->cpu); - ret = -ENODEV; - goto out_region; - } - - ci = get_cpu_cacheinfo(plr->cpu); - - plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == plr->s->res->cache_level) { - plr->line_size = ci->info_list[i].coherency_line_size; - return 0; - } - } - - ret = -1; - rdt_last_cmd_puts("Unable to determine cache line size\n"); -out_region: - pseudo_lock_region_clear(plr); - return ret; -} - -/** - * pseudo_lock_init - Initialize a pseudo-lock region - * @rdtgrp: resource group to which new pseudo-locked region will belong - * - * A pseudo-locked region is associated with a resource group. When this - * association is created the pseudo-locked region is initialized. The - * details of the pseudo-locked region are not known at this time so only - * allocation is done and association established. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_init(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr; - - plr = kzalloc(sizeof(*plr), GFP_KERNEL); - if (!plr) - return -ENOMEM; - - init_waitqueue_head(&plr->lock_thread_wq); - INIT_LIST_HEAD(&plr->pm_reqs); - rdtgrp->plr = plr; - return 0; -} - -/** - * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked - * @plr: pseudo-lock region - * - * Initialize the details required to set up the pseudo-locked region and - * allocate the contiguous memory that will be pseudo-locked to the cache. - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) -{ - int ret; - - ret = pseudo_lock_region_init(plr); - if (ret < 0) - return ret; - - /* - * We do not yet support contiguous regions larger than - * KMALLOC_MAX_SIZE. - */ - if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("Requested region exceeds maximum size\n"); - ret = -E2BIG; - goto out_region; - } - - plr->kmem = kzalloc(plr->size, GFP_KERNEL); - if (!plr->kmem) { - rdt_last_cmd_puts("Unable to allocate memory\n"); - ret = -ENOMEM; - goto out_region; - } - - ret = 0; - goto out; -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * pseudo_lock_free - Free a pseudo-locked region - * @rdtgrp: resource group to which pseudo-locked region belonged - * - * The pseudo-locked region's resources have already been released, or not - * yet created at this point. Now it can be freed and disassociated from the - * resource group. - * - * Return: void - */ -static void pseudo_lock_free(struct rdtgroup *rdtgrp) -{ - pseudo_lock_region_clear(rdtgrp->plr); - kfree(rdtgrp->plr); - rdtgrp->plr = NULL; -} - /** * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache * @_plr: the pseudo-lock region descriptor @@ -543,345 +228,6 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) return 0; } -/** - * rdtgroup_monitor_in_progress - Test if monitoring in progress - * @rdtgrp: resource group being queried - * - * Return: 1 if monitor groups have been created for this resource - * group, 0 otherwise. - */ -static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) -{ - return !list_empty(&rdtgrp->mon.crdtgrp_list); -} - -/** - * rdtgroup_locksetup_user_restrict - Restrict user access to group - * @rdtgrp: resource group needing access restricted - * - * A resource group used for cache pseudo-locking cannot have cpus or tasks - * assigned to it. This is communicated to the user by restricting access - * to all the files that can be used to make such changes. - * - * Permissions restored with rdtgroup_locksetup_user_restore() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restriction of access an attempt will be made to restore permissions but - * the state of the mode of these files will be uncertain when a failure - * occurs. - */ -static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); - if (ret) - goto err_cpus; - - if (resctrl_arch_mon_capable()) { - ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); -err_cpus: - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); -err_tasks: - rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); -out: - return ret; -} - -/** - * rdtgroup_locksetup_user_restore - Restore user access to group - * @rdtgrp: resource group needing access restored - * - * Restore all file access previously removed using - * rdtgroup_locksetup_user_restrict() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restoration of access an attempt will be made to restrict permissions - * again but the state of the mode of these files will be uncertain when - * a failure occurs. - */ -static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); - if (ret) - goto err_cpus; - - if (resctrl_arch_mon_capable()) { - ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); -err_cpus: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); -err_tasks: - rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); -out: - return ret; -} - -/** - * rdtgroup_locksetup_enter - Resource group enters locksetup mode - * @rdtgrp: resource group requested to enter locksetup mode - * - * A resource group enters locksetup mode to reflect that it would be used - * to represent a pseudo-locked region and is in the process of being set - * up to do so. A resource group used for a pseudo-locked region would - * lose the closid associated with it so we cannot allow it to have any - * tasks or cpus assigned nor permit tasks or cpus to be assigned in the - * future. Monitoring of a pseudo-locked region is not allowed either. - * - * The above and more restrictions on a pseudo-locked region are checked - * for and enforced before the resource group enters the locksetup mode. - * - * Returns: 0 if the resource group successfully entered locksetup mode, <0 - * on failure. On failure the last_cmd_status buffer is updated with text to - * communicate details of failure to the user. - */ -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - int ret; - - /* - * The default resource group can neither be removed nor lose the - * default closid associated with it. - */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); - return -EINVAL; - } - - /* - * Cache Pseudo-locking not supported when CDP is enabled. - * - * Some things to consider if you would like to enable this - * support (using L3 CDP as example): - * - When CDP is enabled two separate resources are exposed, - * L3DATA and L3CODE, but they are actually on the same cache. - * The implication for pseudo-locking is that if a - * pseudo-locked region is created on a domain of one - * resource (eg. L3CODE), then a pseudo-locked region cannot - * be created on that same domain of the other resource - * (eg. L3DATA). This is because the creation of a - * pseudo-locked region involves a call to wbinvd that will - * affect all cache allocations on particular domain. - * - Considering the previous, it may be possible to only - * expose one of the CDP resources to pseudo-locking and - * hide the other. For example, we could consider to only - * expose L3DATA and since the L3 cache is unified it is - * still possible to place instructions there are execute it. - * - If only one region is exposed to pseudo-locking we should - * still keep in mind that availability of a portion of cache - * for pseudo-locking should take into account both resources. - * Similarly, if a pseudo-locked region is created in one - * resource, the portion of cache used by it should be made - * unavailable to all future allocations from both resources. - */ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || - resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { - rdt_last_cmd_puts("CDP enabled\n"); - return -EINVAL; - } - - /* - * Not knowing the bits to disable prefetching implies that this - * platform does not support Cache Pseudo-Locking. - */ - if (resctrl_arch_get_prefetch_disable_bits() == 0) { - rdt_last_cmd_puts("Pseudo-locking not supported\n"); - return -EINVAL; - } - - if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("Monitoring in progress\n"); - return -EINVAL; - } - - if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("Tasks assigned to resource group\n"); - return -EINVAL; - } - - if (!cpumask_empty(&rdtgrp->cpu_mask)) { - rdt_last_cmd_puts("CPUs assigned to resource group\n"); - return -EINVAL; - } - - if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); - return -EIO; - } - - ret = pseudo_lock_init(rdtgrp); - if (ret) { - rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); - goto out_release; - } - - /* - * If this system is capable of monitoring a rmid would have been - * allocated when the control group was created. This is not needed - * anymore when this group would be used for pseudo-locking. This - * is safe to call on platforms not capable of monitoring. - */ - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - ret = 0; - goto out; - -out_release: - rdtgroup_locksetup_user_restore(rdtgrp); -out: - return ret; -} - -/** - * rdtgroup_locksetup_exit - resource group exist locksetup mode - * @rdtgrp: resource group - * - * When a resource group exits locksetup mode the earlier restrictions are - * lifted. - * - * Return: 0 on success, <0 on failure - */ -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - int ret; - - if (resctrl_arch_mon_capable()) { - ret = alloc_rmid(rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - } - - ret = rdtgroup_locksetup_user_restore(rdtgrp); - if (ret) { - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - return ret; - } - - pseudo_lock_free(rdtgrp); - return 0; -} - -/** - * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked - * @d: RDT domain - * @cbm: CBM to test - * - * @d represents a cache instance and @cbm a capacity bitmask that is - * considered for it. Determine if @cbm overlaps with any existing - * pseudo-locked region on @d. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: true if @cbm overlaps with pseudo-locked region on @d, false - * otherwise. - */ -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - unsigned int cbm_len; - unsigned long cbm_b; - - if (d->plr) { - cbm_len = d->plr->s->res->cache.cbm_len; - cbm_b = d->plr->cbm; - if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) - return true; - } - return false; -} - -/** - * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy - * @d: RDT domain under test - * - * The setup of a pseudo-locked region affects all cache instances within - * the hierarchy of the region. It is thus essential to know if any - * pseudo-locked regions exist within a cache hierarchy to prevent any - * attempts to create new pseudo-locked regions in the same hierarchy. - * - * Return: true if a pseudo-locked region exists in the hierarchy of @d or - * if it is not possible to test due to memory allocation issue, - * false otherwise. - */ -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - cpumask_var_t cpu_with_psl; - enum resctrl_res_level i; - struct rdt_resource *r; - struct rdt_domain *d_i; - bool ret = false; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) - return true; - - /* - * First determine which cpus have pseudo-locked regions - * associated with them. - */ - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; - - list_for_each_entry(d_i, &r->domains, list) { - if (d_i->plr) - cpumask_or(cpu_with_psl, cpu_with_psl, - &d_i->cpu_mask); - } - } - - /* - * Next test if new pseudo-locked region would intersect with - * existing region. - */ - if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) - ret = true; - - free_cpumask_var(cpu_with_psl); - return ret; -} - /** * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read * pseudo-locked memory @@ -1174,442 +520,3 @@ int resctrl_arch_measure_l3_residency(void *_plr) wake_up_interruptible(&plr->lock_thread_wq); return 0; } - -/** - * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region - * @rdtgrp: Resource group to which the pseudo-locked region belongs. - * @sel: Selector of which measurement to perform on a pseudo-locked region. - * - * The measurement of latency to access a pseudo-locked region should be - * done from a cpu that is associated with that pseudo-locked region. - * Determine which cpu is associated with this region and start a thread on - * that cpu to perform the measurement, wait for that thread to complete. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int cpu; - int ret = -1; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out; - } - - if (!plr->d) { - ret = -ENODEV; - goto out; - } - - plr->thread_done = 0; - cpu = cpumask_first(&plr->d->cpu_mask); - if (!cpu_online(cpu)) { - ret = -ENODEV; - goto out; - } - - plr->cpu = cpu; - - if (sel == 1) - thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 2) - thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 3) - thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else - goto out; - - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - goto out; - } - kthread_bind(thread, cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) - goto out; - - ret = 0; - -out: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -static ssize_t pseudo_lock_measure_trigger(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct rdtgroup *rdtgrp = file->private_data; - size_t buf_size; - char buf[32]; - int ret; - int sel; - - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - ret = kstrtoint(buf, 10, &sel); - if (ret == 0) { - if (sel != 1 && sel != 2 && sel != 3) - return -EINVAL; - ret = debugfs_file_get(file->f_path.dentry); - if (ret) - return ret; - ret = pseudo_lock_measure_cycles(rdtgrp, sel); - if (ret == 0) - ret = count; - debugfs_file_put(file->f_path.dentry); - } - - return ret; -} - -static const struct file_operations pseudo_measure_fops = { - .write = pseudo_lock_measure_trigger, - .open = simple_open, - .llseek = default_llseek, -}; - -/** - * rdtgroup_pseudo_lock_create - Create a pseudo-locked region - * @rdtgrp: resource group to which pseudo-lock region belongs - * - * Called when a resource group in the pseudo-locksetup mode receives a - * valid schemata that should be pseudo-locked. Since the resource group is - * in pseudo-locksetup mode the &struct pseudo_lock_region has already been - * allocated and initialized with the essential information. If a failure - * occurs the resource group remains in the pseudo-locksetup mode with the - * &struct pseudo_lock_region associated with it, but cleared from all - * information and ready for the user to re-attempt pseudo-locking by - * writing the schemata again. - * - * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 - * on failure. Descriptive error will be written to last_cmd_status buffer. - */ -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int new_minor; - struct device *dev; - int ret; - - ret = pseudo_lock_region_alloc(plr); - if (ret < 0) - return ret; - - ret = pseudo_lock_cstates_constrain(plr); - if (ret < 0) { - ret = -EINVAL; - goto out_region; - } - - plr->thread_done = 0; - - plr->closid = rdtgrp->closid; - thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, - cpu_to_node(plr->cpu), - "pseudo_lock/%u", plr->cpu); - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - rdt_last_cmd_printf("Locking thread returned error %d\n", ret); - goto out_cstates; - } - - kthread_bind(thread, plr->cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) { - /* - * If the thread does not get on the CPU for whatever - * reason and the process which sets up the region is - * interrupted then this will leave the thread in runnable - * state and once it gets on the CPU it will dereference - * the cleared, but not freed, plr struct resulting in an - * empty pseudo-locking loop. - */ - rdt_last_cmd_puts("Locking thread interrupted\n"); - goto out_cstates; - } - - ret = pseudo_lock_minor_get(&new_minor); - if (ret < 0) { - rdt_last_cmd_puts("Unable to obtain a new minor number\n"); - goto out_cstates; - } - - /* - * Unlock access but do not release the reference. The - * pseudo-locked region will still be here on return. - * - * The mutex has to be released temporarily to avoid a potential - * deadlock with the mm->mmap_lock which is obtained in the - * device_create() and debugfs_create_dir() callpath below as well as - * before the mmap() callback is called. - */ - mutex_unlock(&rdtgroup_mutex); - - if (!IS_ERR_OR_NULL(debugfs_resctrl)) { - plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, - debugfs_resctrl); - if (!IS_ERR_OR_NULL(plr->debugfs_dir)) - debugfs_create_file("pseudo_lock_measure", 0200, - plr->debugfs_dir, rdtgrp, - &pseudo_measure_fops); - } - - dev = device_create(&pseudo_lock_class, NULL, - MKDEV(pseudo_lock_major, new_minor), - rdtgrp, "%s", rdtgrp->kn->name); - - mutex_lock(&rdtgroup_mutex); - - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - rdt_last_cmd_printf("Failed to create character device: %d\n", - ret); - goto out_debugfs; - } - - /* We released the mutex - check if group was removed while we did so */ - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out_device; - } - - plr->minor = new_minor; - - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; - closid_free(rdtgrp->closid); - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); - - ret = 0; - goto out; - -out_device: - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); -out_debugfs: - debugfs_remove_recursive(plr->debugfs_dir); - pseudo_lock_minor_release(new_minor); -out_cstates: - pseudo_lock_cstates_relax(plr); -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region - * @rdtgrp: resource group to which the pseudo-locked region belongs - * - * The removal of a pseudo-locked region can be initiated when the resource - * group is removed from user space via a "rmdir" from userspace or the - * unmount of the resctrl filesystem. On removal the resource group does - * not go back to pseudo-locksetup mode before it is removed, instead it is - * removed directly. There is thus asymmetry with the creation where the - * &struct pseudo_lock_region is removed here while it was not created in - * rdtgroup_pseudo_lock_create(). - * - * Return: void - */ -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * Default group cannot be a pseudo-locked region so we can - * free closid here. - */ - closid_free(rdtgrp->closid); - goto free; - } - - pseudo_lock_cstates_relax(plr); - debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); - pseudo_lock_minor_release(plr->minor); - -free: - pseudo_lock_free(rdtgrp); -} - -static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = region_find_by_minor(iminor(inode)); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - filp->private_data = rdtgrp; - atomic_inc(&rdtgrp->waitcount); - /* Perform a non-seekable open - llseek is not supported */ - filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - filp->private_data = NULL; - atomic_dec(&rdtgrp->waitcount); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int pseudo_lock_dev_mremap(struct vm_area_struct *area) -{ - /* Not supported */ - return -EINVAL; -} - -static const struct vm_operations_struct pseudo_mmap_ops = { - .mremap = pseudo_lock_dev_mremap, -}; - -static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long vsize = vma->vm_end - vma->vm_start; - unsigned long off = vma->vm_pgoff << PAGE_SHIFT; - struct pseudo_lock_region *plr; - struct rdtgroup *rdtgrp; - unsigned long physical; - unsigned long psize; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - plr = rdtgrp->plr; - - if (!plr->d) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - /* - * Task is required to run with affinity to the cpus associated - * with the pseudo-locked region. If this is not the case the task - * may be scheduled elsewhere and invalidate entries in the - * pseudo-locked region. - */ - if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - physical = __pa(plr->kmem) >> PAGE_SHIFT; - psize = plr->size - off; - - if (off > plr->size) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - /* - * Ensure changes are carried directly to the memory being mapped, - * do not allow copy-on-write mapping. - */ - if (!(vma->vm_flags & VM_SHARED)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - if (vsize > psize) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - memset(plr->kmem + off, 0, vsize); - - if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, - vsize, vma->vm_page_prot)) { - mutex_unlock(&rdtgroup_mutex); - return -EAGAIN; - } - vma->vm_ops = &pseudo_mmap_ops; - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static const struct file_operations pseudo_lock_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = NULL, - .write = NULL, - .open = pseudo_lock_dev_open, - .release = pseudo_lock_dev_release, - .mmap = pseudo_lock_dev_mmap, -}; - -int rdt_pseudo_lock_init(void) -{ - int ret; - - ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); - if (ret < 0) - return ret; - - pseudo_lock_major = ret; - - ret = class_register(&pseudo_lock_class); - if (ret) { - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - return ret; - } - - return 0; -} - -void rdt_pseudo_lock_release(void) -{ - class_unregister(&pseudo_lock_class); - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - pseudo_lock_major = 0; -} diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1425a33d201d..fe3952514add 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -12,22 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include - -#include #include #include "internal.h" @@ -36,4219 +22,239 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); - -static struct kernfs_root *rdt_root; -struct rdtgroup rdtgroup_default; -LIST_HEAD(rdt_all_groups); - -/* list of entries for the schemata file */ -LIST_HEAD(resctrl_schema_all); - -/* The filesystem can only be mounted once. */ -bool resctrl_mounted; - -/* Kernel fs node for "info" directory under root */ -static struct kernfs_node *kn_info; - -/* Kernel fs node for "mon_groups" directory under root */ -static struct kernfs_node *kn_mongrp; - -/* Kernel fs node for "mon_data" directory under root */ -static struct kernfs_node *kn_mondata; - /* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format + * This is safe against resctrl_arch_sched_in() called from __switch_to() + * because __switch_to() is executed with interrupts disabled. A local call + * from update_closid_rmid() is protected against __switch_to() because + * preemption is disabled. */ -int max_name_width, max_data_width; - -static struct seq_buf last_cmd_status; -static char last_cmd_status_buf[512]; - -static int rdtgroup_setup_root(struct rdt_fs_context *ctx); -static void rdtgroup_destroy_root(void); - -struct dentry *debugfs_resctrl; - -static bool resctrl_debug; - -void rdt_last_cmd_clear(void) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_clear(&last_cmd_status); -} - -void rdt_last_cmd_puts(const char *s) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_puts(&last_cmd_status, s); -} - -void rdt_last_cmd_printf(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_vprintf(&last_cmd_status, fmt, ap); - va_end(ap); -} - -void rdt_staged_configs_clear(void) +void resctrl_arch_sync_cpu_defaults(void *info) { - enum resctrl_res_level i; - struct rdt_resource *r; - struct rdt_domain *dom; - - lockdep_assert_held(&rdtgroup_mutex); - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; + struct resctrl_cpu_sync *r = info; - list_for_each_entry(dom, &r->domains, list) - memset(dom->staged_config, 0, sizeof(dom->staged_config)); + if (r) { + this_cpu_write(pqr_state.default_closid, r->closid); + this_cpu_write(pqr_state.default_rmid, r->rmid); } -} -static bool resctrl_is_mbm_enabled(void) -{ - return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled()); + /* + * We cannot unconditionally write the MSR because the current + * executing task might have its own closid selected. Just reuse + * the context switch code. + */ + resctrl_arch_sched_in(current); } -static bool resctrl_is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} +#define INVALID_CONFIG_INDEX UINT_MAX -/* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. +/** + * mon_event_config_index_get - get the hardware index for the + * configurable event + * @evtid: event id. * - * Using a global CLOSID across all resources has some advantages and - * some drawbacks: - * + We can simply set current's closid to assign a task to a resource - * group. - * + Context switch code can avoid extra memory references deciding which - * CLOSID to load into the PQR_ASSOC MSR - * - We give up some options in configuring resource groups across multi-socket - * systems. - * - Our choices on how to configure each resource become progressively more - * limited as the number of resources grows. + * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID + * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID + * INVALID_CONFIG_INDEX for invalid evtid */ -static unsigned long closid_free_map; -static int closid_free_map_len; - -int closids_supported(void) -{ - return closid_free_map_len; -} - -static void closid_init(void) +static inline unsigned int mon_event_config_index_get(u32 evtid) { - struct resctrl_schema *s; - u32 rdt_min_closid = 32; - - /* Compute rdt_min_closid across all resources */ - list_for_each_entry(s, &resctrl_schema_all, list) - rdt_min_closid = min(rdt_min_closid, s->num_closid); - - closid_free_map = BIT_MASK(rdt_min_closid) - 1; - - /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ - __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); - closid_free_map_len = rdt_min_closid; + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return 0; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return 1; + default: + /* Should never reach here */ + return INVALID_CONFIG_INDEX; + } } -static int closid_alloc(void) +void resctrl_arch_mon_event_config_read(void *info) { - int cleanest_closid; - u32 closid; - - lockdep_assert_held(&rdtgroup_mutex); + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; + u64 msrval; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && - resctrl_arch_is_llc_occupancy_enabled()) { - cleanest_closid = resctrl_find_cleanest_closid(); - if (cleanest_closid < 0) - return cleanest_closid; - closid = cleanest_closid; - } else { - closid = ffs(closid_free_map); - if (closid == 0) - return -ENOSPC; - closid--; + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + return; } - __clear_bit(closid, &closid_free_map); + rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - return closid; + /* Report only the valid event configuration bits */ + mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -void closid_free(int closid) +void resctrl_arch_mon_event_config_write(void *info) { - lockdep_assert_held(&rdtgroup_mutex); - - __set_bit(closid, &closid_free_map); -} + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; -/** - * closid_allocated - test if provided closid is in use - * @closid: closid to be tested - * - * Return: true if @closid is currently associated with a resource group, - * false if @closid is free - */ -bool closid_allocated(unsigned int closid) -{ - lockdep_assert_held(&rdtgroup_mutex); + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; + return; + } + wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); - return !test_bit(closid, &closid_free_map); + mon_info->err = 0; } -/** - * rdtgroup_mode_by_closid - Return mode of resource group with closid - * @closid: closid if the resource group - * - * Each resource group is associated with a @closid. Here the mode - * of a resource group can be queried by searching for it using its closid. - * - * Return: mode as &enum rdtgrp_mode of resource group with closid @closid - */ -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +static void l3_qos_cfg_update(void *arg) { - struct rdtgroup *rdtgrp; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->closid == closid) - return rdtgrp->mode; - } + bool *enable = arg; - return RDT_NUM_MODES; + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } -static const char * const rdt_mode_str[] = { - [RDT_MODE_SHAREABLE] = "shareable", - [RDT_MODE_EXCLUSIVE] = "exclusive", - [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", - [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", -}; - -/** - * rdtgroup_mode_str - Return the string representation of mode - * @mode: the resource group mode as &enum rdtgroup_mode - * - * Return: string representation of valid mode, "unknown" otherwise - */ -static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +static void l2_qos_cfg_update(void *arg) { - if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) - return "unknown"; + bool *enable = arg; - return rdt_mode_str[mode]; + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } -/* set uid and gid of rdtgroup dirs and files to that of the creator */ -static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +static int set_cache_qos_cfg(int level, bool enable) { - struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, - .ia_uid = current_fsuid(), - .ia_gid = current_fsgid(), }; - - if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && - gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) - return 0; + void (*update)(void *arg); + struct rdt_resource *r_l; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int cpu; - return kernfs_setattr(kn, &iattr); -} + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); -static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) -{ - struct kernfs_node *kn; - int ret; + if (level == RDT_RESOURCE_L3) + update = l3_qos_cfg_update; + else if (level == RDT_RESOURCE_L2) + update = l2_qos_cfg_update; + else + return -EINVAL; - kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - 0, rft->kf_ops, rft, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; + r_l = &rdt_resources_all[level].r_resctrl; + list_for_each_entry(d, &r_l->domains, list) { + if (r_l->cache.arch_has_per_cpu_cfg) + /* Pick all the CPUs in the domain instance */ + for_each_cpu(cpu, &d->cpu_mask) + cpumask_set_cpu(cpu, cpu_mask); + else + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } - return 0; -} + /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, update, &enable, 1); -static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - struct rftype *rft = of->kn->priv; + free_cpumask_var(cpu_mask); - if (rft->seq_show) - return rft->seq_show(of, m, arg); return 0; } -static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, - size_t nbytes, loff_t off) +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { - struct rftype *rft = of->kn->priv; - - if (rft->write) - return rft->write(of, buf, nbytes, off); - - return -EINVAL; -} - -static const struct kernfs_ops rdtgroup_kf_single_ops = { - .atomic_write_len = PAGE_SIZE, - .write = rdtgroup_file_write, - .seq_show = rdtgroup_seqfile_show, -}; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); -static const struct kernfs_ops kf_mondata_ops = { - .atomic_write_len = PAGE_SIZE, - .seq_show = rdtgroup_mondata_show, -}; + if (!r->cdp_capable) + return; -static bool is_cpu_list(struct kernfs_open_file *of) -{ - struct rftype *rft = of->kn->priv; + if (r->rid == RDT_RESOURCE_L2) + l2_qos_cfg_update(&hw_res->cdp_enabled); - return rft->flags & RFTYPE_FLAGS_CPUS_LIST; + if (r->rid == RDT_RESOURCE_L3) + l3_qos_cfg_update(&hw_res->cdp_enabled); } -static int rdtgroup_cpus_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) +static int cdp_enable(int level) { - struct rdtgroup *rdtgrp; - struct cpumask *mask; - int ret = 0; + struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; + int ret; - rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!r_l->alloc_capable) + return -EINVAL; - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - mask = &rdtgrp->plr->d->cpu_mask; - seq_printf(s, is_cpu_list(of) ? - "%*pbl\n" : "%*pb\n", - cpumask_pr_args(mask)); - } - } else { - seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", - cpumask_pr_args(&rdtgrp->cpu_mask)); - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); + ret = set_cache_qos_cfg(level, true); + if (!ret) + rdt_resources_all[level].cdp_enabled = true; return ret; } -/* - * This is safe against resctrl_arch_sched_in() called from __switch_to() - * because __switch_to() is executed with interrupts disabled. A local call - * from update_closid_rmid() is protected against __switch_to() because - * preemption is disabled. - */ -void resctrl_arch_sync_cpu_defaults(void *info) -{ - struct resctrl_cpu_sync *r = info; - - if (r) { - this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->rmid); - } - - /* - * We cannot unconditionally write the MSR because the current - * executing task might have its own closid selected. Just reuse - * the context switch code. - */ - resctrl_arch_sched_in(current); -} - -/* - * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, - * - * Per task closids/rmids must have been set up before calling this function. - * @r may be NULL. - */ -static void -update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +static void cdp_disable(int level) { - struct resctrl_cpu_sync defaults; - struct resctrl_cpu_sync *defaults_p = NULL; + struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - if (r) { - defaults.closid = r->closid; - defaults.rmid = r->mon.rmid; - defaults_p = &defaults; + if (r_hw->cdp_enabled) { + set_cache_qos_cfg(level, false); + r_hw->cdp_enabled = false; } - - on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, - 1); } -static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask) +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { - struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; - struct list_head *head; + struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - /* Check whether cpus belong to parent ctrl group */ - cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; - } - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Give any dropped cpus to parent rdtgroup */ - cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); - update_closid_rmid(tmpmask, prgrp); - } - - /* - * If we added cpus, remove them from previous group that owned them - * and update per-cpu rmid - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - if (crgrp == rdtgrp) - continue; - cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, - tmpmask); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (enable) + return cdp_enable(l); - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + cdp_disable(l); return 0; } -static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +static int reset_all_ctrls(struct rdt_resource *r) { - struct rdtgroup *crgrp; - - cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); - /* update the child mon group masks as well*/ - list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) - cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); -} + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + struct rdt_hw_domain *hw_dom; + struct msr_param msr_param; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int i; -static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask, cpumask_var_t tmpmask1) -{ - struct rdtgroup *r, *crgrp; - struct list_head *head; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Can't drop from default group */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Can't drop CPUs from default group\n"); - return -EINVAL; - } + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - /* Give any dropped cpus to rdtgroup_default */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, tmpmask); - update_closid_rmid(tmpmask, &rdtgroup_default); - } + msr_param.res = r; + msr_param.low = 0; + msr_param.high = hw_res->num_closid; /* - * If we added cpus, remove them from previous group and - * the prev group's child groups that owned them - * and update per-cpu closid/rmid. + * Disable resource control for this resource by setting all + * CBMs in all domains to the maximum mask value. Pick one CPU + * from each domain to update the MSRs below. */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { - if (r == rdtgrp) - continue; - cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); - if (!cpumask_empty(tmpmask1)) - cpumask_rdtgrp_clear(r, tmpmask1); - } - update_closid_rmid(tmpmask, rdtgrp); + list_for_each_entry(d, &r->domains, list) { + hw_dom = resctrl_to_arch_dom(d); + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); + + for (i = 0; i < hw_res->num_closid; i++) + hw_dom->ctrl_val[i] = r->default_ctrl; } - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + /* Update CBM on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - /* - * Clear child mon group masks since there is a new parent mask - * now and update the rmid for the cpus the child lost. - */ - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); - update_closid_rmid(tmpmask, rdtgrp); - cpumask_clear(&crgrp->cpu_mask); - } + free_cpumask_var(cpu_mask); return 0; } -static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) +void resctrl_arch_reset_resources(void) { - cpumask_var_t tmpmask, newmask, tmpmask1; - struct rdtgroup *rdtgrp; - int ret; + struct rdt_resource *r; - if (!buf) - return -EINVAL; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - return -ENOMEM; - } - if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - return -ENOMEM; - } - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto unlock; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - if (is_cpu_list(of)) - ret = cpulist_parse(buf, newmask); - else - ret = cpumask_parse(buf, newmask); - - if (ret) { - rdt_last_cmd_puts("Bad CPU list/mask\n"); - goto unlock; - } - - /* check that user didn't specify any offline cpus */ - cpumask_andnot(tmpmask, newmask, cpu_online_mask); - if (!cpumask_empty(tmpmask)) { - ret = -EINVAL; - rdt_last_cmd_puts("Can only assign online CPUs\n"); - goto unlock; - } - - if (rdtgrp->type == RDTCTRL_GROUP) - ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); - else if (rdtgrp->type == RDTMON_GROUP) - ret = cpus_mon_write(rdtgrp, newmask, tmpmask); - else - ret = -EINVAL; - -unlock: - rdtgroup_kn_unlock(of->kn); - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - free_cpumask_var(tmpmask1); - - return ret ?: nbytes; -} - -/** - * rdtgroup_remove - the helper to remove resource group safely - * @rdtgrp: resource group to remove - * - * On resource group creation via a mkdir, an extra kernfs_node reference is - * taken to ensure that the rdtgroup structure remains accessible for the - * rdtgroup_kn_unlock() calls where it is removed. - * - * Drop the extra reference here, then free the rdtgroup structure. - * - * Return: void - */ -static void rdtgroup_remove(struct rdtgroup *rdtgrp) -{ - kernfs_put(rdtgrp->kn); - kfree(rdtgrp); -} - -static void _update_task_closid_rmid(void *task) -{ - /* - * If the task is still current on this CPU, update PQR_ASSOC MSR. - * Otherwise, the MSR is updated when the task is scheduled in. - */ - if (task == current) - resctrl_arch_sched_in(task); -} - -static void update_task_closid_rmid(struct task_struct *t) -{ - if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) - smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); - else - _update_task_closid_rmid(t); -} - -static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) -{ - u32 closid, rmid = rdtgrp->mon.rmid; - - if (rdtgrp->type == RDTCTRL_GROUP) - closid = rdtgrp->closid; - else if (rdtgrp->type == RDTMON_GROUP) - closid = rdtgrp->mon.parent->closid; - else - return false; - - return resctrl_arch_match_closid(tsk, closid) && - resctrl_arch_match_rmid(tsk, closid, rmid); -} - -static int __rdtgroup_move_task(struct task_struct *tsk, - struct rdtgroup *rdtgrp) -{ - /* If the task is already in rdtgrp, no need to move the task. */ - if (task_in_rdtgroup(tsk, rdtgrp)) - return 0; - - /* - * Set the task's closid/rmid before the PQR_ASSOC MSR can be - * updated by them. - * - * For ctrl_mon groups, move both closid and rmid. - * For monitor groups, can move the tasks only from - * their parent CTRL group. - */ - if (rdtgrp->type == RDTMON_GROUP && - !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } - - if (rdtgrp->type == RDTMON_GROUP) - resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, - rdtgrp->mon.rmid); - else - resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, - rdtgrp->mon.rmid); - - /* - * Ensure the task's closid and rmid are written before determining if - * the task is current that will decide if it will be interrupted. - * This pairs with the full barrier between the rq->curr update and - * resctrl_arch_sched_in() during context switch. - */ - smp_mb(); - - /* - * By now, the task's closid and rmid are set. If the task is current - * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource - * group go into effect. If the task is not current, the MSR will be - * updated when the task is scheduled in. - */ - update_task_closid_rmid(tsk); - - return 0; -} - -static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && - resctrl_arch_match_closid(t, r->closid)); -} - -static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && - resctrl_arch_match_rmid(t, r->mon.parent->closid, - r->mon.rmid)); -} - -/** - * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group - * @r: Resource group - * - * Return: 1 if tasks have been assigned to @r, 0 otherwise - */ -int rdtgroup_tasks_assigned(struct rdtgroup *r) -{ - struct task_struct *p, *t; - int ret = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - - return ret; -} - -static int rdtgroup_task_write_permission(struct task_struct *task, - struct kernfs_open_file *of) -{ - const struct cred *tcred = get_task_cred(task); - const struct cred *cred = current_cred(); - int ret = 0; - - /* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. - */ - if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) { - rdt_last_cmd_printf("No permission to move task %d\n", task->pid); - ret = -EPERM; - } - - put_cred(tcred); - return ret; -} - -static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, - struct kernfs_open_file *of) -{ - struct task_struct *tsk; - int ret; - - rcu_read_lock(); - if (pid) { - tsk = find_task_by_vpid(pid); - if (!tsk) { - rcu_read_unlock(); - rdt_last_cmd_printf("No task %d\n", pid); - return -ESRCH; - } - } else { - tsk = current; - } - - get_task_struct(tsk); - rcu_read_unlock(); - - ret = rdtgroup_task_write_permission(tsk, of); - if (!ret) - ret = __rdtgroup_move_task(tsk, rdtgrp); - - put_task_struct(tsk); - return ret; -} - -static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - char *pid_str; - int ret = 0; - pid_t pid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - while (buf && buf[0] != '\0' && buf[0] != '\n') { - pid_str = strim(strsep(&buf, ",")); - - if (kstrtoint(pid_str, 0, &pid)) { - rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); - ret = -EINVAL; - break; - } - - if (pid < 0) { - rdt_last_cmd_printf("Invalid pid %d\n", pid); - ret = -EINVAL; - break; - } - - ret = rdtgroup_move_task(pid, rdtgrp, of); - if (ret) { - rdt_last_cmd_printf("Error while processing task %d\n", pid); - break; - } - } - -unlock: - rdtgroup_kn_unlock(of->kn); - - return ret ?: nbytes; -} - -static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) -{ - struct task_struct *p, *t; - pid_t pid; - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - pid = task_pid_vnr(t); - if (pid) - seq_printf(s, "%d\n", pid); - } - } - rcu_read_unlock(); -} - -static int rdtgroup_tasks_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - show_rdt_tasks(rdtgrp, s); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -static int rdtgroup_closid_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - seq_printf(s, "%u\n", rdtgrp->closid); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -static int rdtgroup_rmid_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - seq_printf(s, "%u\n", rdtgrp->mon.rmid); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#ifdef CONFIG_PROC_CPU_RESCTRL - -/* - * A task can only be part of one resctrl control group and of one monitor - * group which is associated to that control group. - * - * 1) res: - * mon: - * - * resctrl is not available. - * - * 2) res:/ - * mon: - * - * Task is part of the root resctrl control group, and it is not associated - * to any monitor group. - * - * 3) res:/ - * mon:mon0 - * - * Task is part of the root resctrl control group and monitor group mon0. - * - * 4) res:group0 - * mon: - * - * Task is part of resctrl control group group0, and it is not associated - * to any monitor group. - * - * 5) res:group0 - * mon:mon1 - * - * Task is part of resctrl control group group0 and monitor group mon1. - */ -int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, - struct pid *pid, struct task_struct *tsk) -{ - struct rdtgroup *rdtg; - int ret = 0; - - mutex_lock(&rdtgroup_mutex); - - /* Return empty if resctrl has not been mounted. */ - if (!resctrl_mounted) { - seq_puts(s, "res:\nmon:\n"); - goto unlock; - } - - list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { - struct rdtgroup *crg; - - /* - * Task information is only relevant for shareable - * and exclusive groups. - */ - if (rdtg->mode != RDT_MODE_SHAREABLE && - rdtg->mode != RDT_MODE_EXCLUSIVE) - continue; - - if (!resctrl_arch_match_closid(tsk, rdtg->closid)) - continue; - - seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", - rdtg->kn->name); - seq_puts(s, "mon:"); - list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, - mon.crdtgrp_list) { - if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, - crg->mon.rmid)) - continue; - seq_printf(s, "%s", crg->kn->name); - break; - } - seq_putc(s, '\n'); - goto unlock; - } - /* - * The above search should succeed. Otherwise return - * with an error. - */ - ret = -ENOENT; -unlock: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} -#endif - -static int rdt_last_cmd_status_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - int len; - - mutex_lock(&rdtgroup_mutex); - len = seq_buf_used(&last_cmd_status); - if (len) - seq_printf(seq, "%.*s", len, last_cmd_status_buf); - else - seq_puts(seq, "ok\n"); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_num_closids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - - seq_printf(seq, "%u\n", s->num_closid); - return 0; -} - -static int rdt_default_ctrl_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->default_ctrl); - return 0; -} - -static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.min_cbm_bits); - return 0; -} - -static int rdt_shareable_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->cache.shareable_bits); - return 0; -} - -/* - * rdt_bit_usage_show - Display current usage of resources - * - * A domain is a shared resource that can now be allocated differently. Here - * we display the current regions of the domain as an annotated bitmask. - * For each domain of this resource its allocation bitmask - * is annotated as below to indicate the current usage of the corresponding bit: - * 0 - currently unused - * X - currently available for sharing and used by software and hardware - * H - currently used by hardware only but available for software use - * S - currently used and shareable by software only - * E - currently used exclusively by one resource group - * P - currently pseudo-locked by one resource group - */ -static int rdt_bit_usage_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - /* - * Use unsigned long even though only 32 bits are used to ensure - * test_bit() is used safely. - */ - unsigned long sw_shareable = 0, hw_shareable = 0; - unsigned long exclusive = 0, pseudo_locked = 0; - struct rdt_resource *r = s->res; - struct rdt_domain *dom; - int i, hwb, swb, excl, psl; - enum rdtgrp_mode mode; - bool sep = false; - u32 ctrl_val; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_putc(seq, ';'); - sw_shareable = 0; - exclusive = 0; - seq_printf(seq, "%d=", dom->id); - for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) - continue; - ctrl_val = resctrl_arch_get_config(r, dom, i, - s->conf_type); - mode = rdtgroup_mode_by_closid(i); - switch (mode) { - case RDT_MODE_SHAREABLE: - sw_shareable |= ctrl_val; - break; - case RDT_MODE_EXCLUSIVE: - exclusive |= ctrl_val; - break; - case RDT_MODE_PSEUDO_LOCKSETUP: - /* - * RDT_MODE_PSEUDO_LOCKSETUP is possible - * here but not included since the CBM - * associated with this CLOSID in this mode - * is not initialized and no task or cpu can be - * assigned this CLOSID. - */ - break; - case RDT_MODE_PSEUDO_LOCKED: - case RDT_NUM_MODES: - WARN(1, - "invalid mode for closid %d\n", i); - break; - } - } - for (i = r->cache.cbm_len - 1; i >= 0; i--) { - pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, &hw_shareable); - swb = test_bit(i, &sw_shareable); - excl = test_bit(i, &exclusive); - psl = test_bit(i, &pseudo_locked); - if (hwb && swb) - seq_putc(seq, 'X'); - else if (hwb && !swb) - seq_putc(seq, 'H'); - else if (!hwb && swb) - seq_putc(seq, 'S'); - else if (excl) - seq_putc(seq, 'E'); - else if (psl) - seq_putc(seq, 'P'); - else /* Unused bits remain */ - seq_putc(seq, '0'); - } - sep = true; - } - seq_putc(seq, '\n'); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return 0; -} - -static int rdt_min_bw_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.min_bw); - return 0; -} - -static int rdt_num_rmids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - seq_printf(seq, "%d\n", r->num_rmid); - - return 0; -} - -static int rdt_mon_features_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - struct mon_evt *mevt; - - list_for_each_entry(mevt, &r->evt_list, list) { - seq_printf(seq, "%s\n", mevt->name); - if (mevt->configurable) - seq_printf(seq, "%s_config\n", mevt->name); - } - - return 0; -} - -static int rdt_bw_gran_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.bw_gran); - return 0; -} - -static int rdt_delay_linear_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.delay_linear); - return 0; -} - -static int max_threshold_occ_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); - - return 0; -} - -static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) - seq_puts(seq, "per-thread\n"); - else - seq_puts(seq, "max\n"); - - return 0; -} - -static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - unsigned int bytes; - int ret; - - ret = kstrtouint(buf, 0, &bytes); - if (ret) - return ret; - - if (bytes > resctrl_rmid_realloc_limit) - return -EINVAL; - - resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); - - return nbytes; -} - -/* - * rdtgroup_mode_show - Display mode of this resource group - */ -static int rdtgroup_mode_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); - - rdtgroup_kn_unlock(of->kn); - return 0; -} - -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) -{ - switch (my_type) { - case CDP_CODE: - return CDP_DATA; - case CDP_DATA: - return CDP_CODE; - default: - case CDP_NONE: - return CDP_NONE; - } -} - -static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); - - return 0; -} - -/** - * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other - * @r: Resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @type: CDP type of @r. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Checks if provided @cbm intended to be used for @closid on domain - * @d overlaps with any other closids or other hardware usage associated - * with this domain. If @exclusive is true then only overlaps with - * resource groups in exclusive mode will be considered. If @exclusive - * is false then overlaps with any resource group or hardware entities - * will be considered. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: false if CBM does not overlap, true if it does. - */ -static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm, int closid, - enum resctrl_conf_type type, bool exclusive) -{ - enum rdtgrp_mode mode; - unsigned long ctrl_b; - int i; - - /* Check for any overlap with regions used by hardware directly */ - if (!exclusive) { - ctrl_b = r->cache.shareable_bits; - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) - return true; - } - - /* Check for overlap with other resource groups */ - for (i = 0; i < closids_supported(); i++) { - ctrl_b = resctrl_arch_get_config(r, d, i, type); - mode = rdtgroup_mode_by_closid(i); - if (closid_allocated(i) && i != closid && - mode != RDT_MODE_PSEUDO_LOCKSETUP) { - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { - if (exclusive) { - if (mode == RDT_MODE_EXCLUSIVE) - return true; - continue; - } - return true; - } - } - } - - return false; -} - -/** - * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware - * @s: Schema for the resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Resources that can be allocated using a CBM can use the CBM to control - * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test - * for overlap. Overlap test is not limited to the specific resource for - * which the CBM is intended though - when dealing with CDP resources that - * share the underlying hardware the overlap check should be performed on - * the CDP resource sharing the hardware also. - * - * Refer to description of __rdtgroup_cbm_overlaps() for the details of the - * overlap test. - * - * Return: true if CBM overlap detected, false if there is no overlap - */ -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - struct rdt_resource *r = s->res; - - if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, - exclusive)) - return true; - - if (!resctrl_arch_get_cdp_enabled(r->rid)) - return false; - return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); -} - -/** - * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive - * @rdtgrp: Resource group identified through its closid. - * - * An exclusive resource group implies that there should be no sharing of - * its allocated resources. At the time this group is considered to be - * exclusive this test can determine if its current schemata supports this - * setting by testing for overlap with all other resource groups. - * - * Return: true if resource group can be exclusive, false if there is overlap - * with allocations of other resource groups and thus this resource group - * cannot be exclusive. - */ -static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) -{ - int closid = rdtgrp->closid; - struct resctrl_schema *s; - struct rdt_resource *r; - bool has_cache = false; - struct rdt_domain *d; - u32 ctrl; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) - continue; - has_cache = true; - list_for_each_entry(d, &r->domains, list) { - ctrl = resctrl_arch_get_config(r, d, closid, - s->conf_type); - if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { - rdt_last_cmd_puts("Schemata overlaps\n"); - return false; - } - } - } - - if (!has_cache) { - rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); - return false; - } - - return true; -} - -/* - * rdtgroup_mode_write - Modify the resource group's mode - */ -static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - enum rdtgrp_mode mode; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - rdt_last_cmd_clear(); - - mode = rdtgrp->mode; - - if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || - (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || - (!strcmp(buf, "pseudo-locksetup") && - mode == RDT_MODE_PSEUDO_LOCKSETUP) || - (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) - goto out; - - if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); - ret = -EINVAL; - goto out; - } - - if (!strcmp(buf, "shareable")) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_SHAREABLE; - } else if (!strcmp(buf, "exclusive")) { - if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - ret = -EINVAL; - goto out; - } - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && - !strcmp(buf, "pseudo-locksetup")) { - ret = rdtgroup_locksetup_enter(rdtgrp); - if (ret) - goto out; - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; - } else { - rdt_last_cmd_puts("Unknown or unsupported mode\n"); - ret = -EINVAL; - } - -out: - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - -/** - * rdtgroup_cbm_to_size - Translate CBM to size in bytes - * @r: RDT resource to which @d belongs. - * @d: RDT domain instance. - * @cbm: bitmask for which the size should be computed. - * - * The bitmask provided associated with the RDT domain instance @d will be - * translated into how many bytes it represents. The size in bytes is - * computed by first dividing the total cache size by the CBM length to - * determine how many bytes each bit in the bitmask represents. The result - * is multiplied with the number of bits set in the bitmask. - * - * @cbm is unsigned long, even if only 32 bits are used to make the - * bitmap functions work correctly. - */ -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, - struct rdt_domain *d, unsigned long cbm) -{ - struct cpu_cacheinfo *ci; - unsigned int size = 0; - int num_b, i; - - num_b = bitmap_weight(&cbm, r->cache.cbm_len); - ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == r->cache_level) { - size = ci->info_list[i].size / r->cache.cbm_len * num_b; - break; - } - } - - return size; -} - -/* - * rdtgroup_size_show - Display size in bytes of allocated regions - * - * The "size" file mirrors the layout of the "schemata" file, printing the - * size in bytes of each region instead of the capacity bitmask. - */ -static int rdtgroup_size_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - enum resctrl_conf_type type; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - struct rdt_domain *d; - unsigned int size; - int ret = 0; - u32 closid; - bool sep; - u32 ctrl; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%*s:", max_name_width, - rdtgrp->plr->s->name); - size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, - rdtgrp->plr->d, - rdtgrp->plr->cbm); - seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); - } - goto out; - } - - closid = rdtgrp->closid; - - list_for_each_entry(schema, &resctrl_schema_all, list) { - r = schema->res; - type = schema->conf_type; - sep = false; - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(d, &r->domains, list) { - if (sep) - seq_putc(s, ';'); - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - size = 0; - } else { - if (is_mba_sc(r)) - ctrl = d->mbps_val[closid]; - else - ctrl = resctrl_arch_get_config(r, d, - closid, - type); - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) - size = ctrl; - else - size = rdtgroup_cbm_to_size(r, d, ctrl); - } - seq_printf(s, "%d=%u", d->id, size); - sep = true; - } - seq_putc(s, '\n'); - } - -out: - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#define INVALID_CONFIG_INDEX UINT_MAX - -/** - * mon_event_config_index_get - get the hardware index for the - * configurable event - * @evtid: event id. - * - * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID - * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID - * INVALID_CONFIG_INDEX for invalid evtid - */ -static inline unsigned int mon_event_config_index_get(u32 evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return 0; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return 1; - default: - /* Should never reach here */ - return INVALID_CONFIG_INDEX; - } -} - -void resctrl_arch_mon_event_config_read(void *info) -{ - struct resctrl_mon_config_info *mon_info = info; - unsigned int index; - u64 msrval; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - - /* Report only the valid event configuration bits */ - mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; -} - -static void mondata_config_read(struct resctrl_mon_config_info *mon_info) -{ - smp_call_function_any(&mon_info->d->cpu_mask, - resctrl_arch_mon_event_config_read, mon_info, 1); -} - -static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) -{ - struct resctrl_mon_config_info mon_info = {0}; - struct rdt_domain *dom; - bool sep = false; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); - mon_info.r = r; - mon_info.d = dom; - mon_info.evtid = evtid; - mondata_config_read(&mon_info); - - seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); - sep = true; - } - seq_puts(s, "\n"); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return 0; -} - -static int mbm_total_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); - - return 0; -} - -static int mbm_local_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); - - return 0; -} - -void resctrl_arch_mon_event_config_write(void *info) -{ - struct resctrl_mon_config_info *mon_info = info; - unsigned int index; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - mon_info->err = -EINVAL; - return; - } - wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); - - mon_info->err = 0; -} - -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) -{ - struct resctrl_mon_config_info mon_info = {0}; - - /* - * Read the current config value first. If both are the same then - * no need to write it again. - */ - mon_info.r = r; - mon_info.d = d; - mon_info.evtid = evtid; - mondata_config_read(&mon_info); - if (mon_info.mon_config == val) - return 0; - - mon_info.mon_config = val; - - /* - * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the - * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE - * are scoped at the domain level. Writing any of these MSRs - * on one CPU is observed by all the CPUs in the domain. - */ - smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, - &mon_info, 1); - if (mon_info.err) { - rdt_last_cmd_puts("Invalid event configuration\n"); - return mon_info.err; - } - - /* - * When an Event Configuration is changed, the bandwidth counters - * for all RMIDs and Events will be cleared by the hardware. The - * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for - * every RMID on the next read to any event for every RMID. - * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) - * cleared while it is tracked by the hardware. Clear the - * mbm_local and mbm_total counts for all the RMIDs. - */ - resctrl_arch_reset_rmid_all(r, d); - - return 0; -} - -static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) -{ - char *dom_str = NULL, *id_str; - unsigned long dom_id, val; - struct rdt_domain *d; - int err; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - -next: - if (!tok || tok[0] == '\0') - return 0; - - /* Start processing the strings for each domain */ - dom_str = strim(strsep(&tok, ";")); - id_str = strsep(&dom_str, "="); - - if (!id_str || kstrtoul(id_str, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); - return -EINVAL; - } - - if (!dom_str || kstrtoul(dom_str, 16, &val)) { - rdt_last_cmd_puts("Non-numeric event configuration value\n"); - return -EINVAL; - } - - /* Value from user cannot be more than the supported set of events */ - if ((val & r->mbm_cfg_mask) != val) { - rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", - r->mbm_cfg_mask); - return -EINVAL; - } - - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - err = mbm_config_write_domain(r, d, evtid, val); - if (err) - return err; - goto next; - } - } - - return -EINVAL; -} - -static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return ret ?: nbytes; -} - -static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return ret ?: nbytes; -} - -/* rdtgroup information files for one cache resource. */ -static struct rftype res_common_files[] = { - { - .name = "last_cmd_status", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_last_cmd_status_show, - .fflags = RFTYPE_TOP_INFO, - }, - { - .name = "num_closids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_closids_show, - .fflags = RFTYPE_CTRL_INFO, - }, - { - .name = "mon_features", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_mon_features_show, - .fflags = RFTYPE_MON_INFO, - }, - { - .name = "num_rmids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_rmids_show, - .fflags = RFTYPE_MON_INFO, - }, - { - .name = "cbm_mask", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_default_ctrl_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_cbm_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_cbm_bits_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "shareable_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_shareable_bits_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "bit_usage", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bit_usage_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_bandwidth", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_bw_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "bandwidth_gran", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bw_gran_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "delay_linear", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_delay_linear_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - /* - * Platform specific which (if any) capabilities are provided by - * thread_throttle_mode. Defer "fflags" initialization to platform - * discovery. - */ - { - .name = "thread_throttle_mode", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_thread_throttle_mode_show, - }, - { - .name = "max_threshold_occupancy", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = max_threshold_occ_write, - .seq_show = max_threshold_occ_show, - .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "mbm_total_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_total_bytes_config_show, - .write = mbm_total_bytes_config_write, - }, - { - .name = "mbm_local_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_local_bytes_config_show, - .write = mbm_local_bytes_config_write, - }, - { - .name = "cpus", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "cpus_list", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .flags = RFTYPE_FLAGS_CPUS_LIST, - .fflags = RFTYPE_BASE, - }, - { - .name = "tasks", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_tasks_write, - .seq_show = rdtgroup_tasks_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "mon_hw_id", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_rmid_show, - .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, - }, - { - .name = "schemata", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_schemata_write, - .seq_show = rdtgroup_schemata_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "mode", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_mode_write, - .seq_show = rdtgroup_mode_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "size", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_size_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "sparse_masks", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_has_sparse_bitmasks_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "ctrl_hw_id", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_closid_show, - .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, - }, - -}; - -static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) -{ - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - lockdep_assert_held(&rdtgroup_mutex); - - if (resctrl_debug) - fflags |= RFTYPE_DEBUG; - - for (rft = rfts; rft < rfts + len; rft++) { - if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { - ret = rdtgroup_add_file(kn, rft); - if (ret) - goto error; - } - } - - return 0; -error: - pr_warn("Failed to add %s, err=%d\n", rft->name, ret); - while (--rft >= rfts) { - if ((fflags & rft->fflags) == rft->fflags) - kernfs_remove_by_name(kn, rft->name); - } - return ret; -} - -static struct rftype *rdtgroup_get_rftype_by_name(const char *name) -{ - struct rftype *rfts, *rft; - int len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - return rft; - } - - return NULL; -} - -static void thread_throttle_mode_init(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - struct rftype *rft; - - if (!r->alloc_capable || - r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) - return; - - rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); - if (!rft) - return; - - rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; -} - -void mbm_config_rftype_init(const char *config) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name(config); - if (rft) - rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; -} - -/** - * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * - * The permissions of named resctrl file, directory, or link are modified - * to not allow read, write, or execute by any user. - * - * WARNING: This function is intended to communicate to the user that the - * resctrl file has been locked down - that it is not relevant to the - * particular state the system finds itself in. It should not be relied - * on to protect from user access because after the file's permissions - * are restricted the user can still change the permissions using chmod - * from the command line. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn; - int ret = 0; - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - iattr.ia_mode = S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode = S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode = S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -/** - * rdtgroup_kn_mode_restore - Restore user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * @mask: Mask of permissions that should be restored - * - * Restore the permissions of the named file. If @name is a directory the - * permissions of its parent will be used. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn, *parent; - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - iattr.ia_mode = rft->mode & mask; - } - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - parent = kernfs_get_parent(kn); - if (parent) { - iattr.ia_mode |= parent->mode; - kernfs_put(parent); - } - iattr.ia_mode |= S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode |= S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode |= S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -static int rdtgroup_mkdir_info_resdir(void *priv, char *name, - unsigned long fflags) -{ - struct kernfs_node *kn_subdir; - int ret; - - kn_subdir = kernfs_create_dir(kn_info, name, - kn_info->mode, priv); - if (IS_ERR(kn_subdir)) - return PTR_ERR(kn_subdir); - - ret = rdtgroup_kn_set_ugid(kn_subdir); - if (ret) - return ret; - - ret = rdtgroup_add_files(kn_subdir, fflags); - if (!ret) - kernfs_activate(kn_subdir); - - return ret; -} - -static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) -{ - enum resctrl_res_level i; - struct resctrl_schema *s; - struct rdt_resource *r; - unsigned long fflags; - char name[32]; - int ret; - - /* create the directory */ - kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); - if (IS_ERR(kn_info)) - return PTR_ERR(kn_info); - - ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); - if (ret) - goto out_destroy; - - /* loop over enabled controls, these are all alloc_capable */ - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - fflags = r->fflags | RFTYPE_CTRL_INFO; - ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); - if (ret) - goto out_destroy; - } - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->mon_capable) - continue; - - fflags = r->fflags | RFTYPE_MON_INFO; - sprintf(name, "%s_MON", r->name); - ret = rdtgroup_mkdir_info_resdir(r, name, fflags); - if (ret) - goto out_destroy; - } - - ret = rdtgroup_kn_set_ugid(kn_info); - if (ret) - goto out_destroy; - - kernfs_activate(kn_info); - - return 0; - -out_destroy: - kernfs_remove(kn_info); - return ret; -} - -static int -mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, - char *name, struct kernfs_node **dest_kn) -{ - struct kernfs_node *kn; - int ret; - - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - if (dest_kn) - *dest_kn = kn; - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -static void l3_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); -} - -static void l2_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); -} - -static inline bool is_mba_linear(void) -{ - return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; -} - -static int set_cache_qos_cfg(int level, bool enable) -{ - void (*update)(void *arg); - struct rdt_resource *r_l; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int cpu; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (level == RDT_RESOURCE_L3) - update = l3_qos_cfg_update; - else if (level == RDT_RESOURCE_L2) - update = l2_qos_cfg_update; - else - return -EINVAL; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - r_l = &rdt_resources_all[level].r_resctrl; - list_for_each_entry(d, &r_l->domains, list) { - if (r_l->cache.arch_has_per_cpu_cfg) - /* Pick all the CPUs in the domain instance */ - for_each_cpu(cpu, &d->cpu_mask) - cpumask_set_cpu(cpu, cpu_mask); - else - /* Pick one CPU from each domain instance to update MSR */ - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - } - - /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, update, &enable, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* Restore the qos cfg state when a domain comes online */ -void rdt_domain_reconfigure_cdp(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - - if (!r->cdp_capable) - return; - - if (r->rid == RDT_RESOURCE_L2) - l2_qos_cfg_update(&hw_res->cdp_enabled); - - if (r->rid == RDT_RESOURCE_L3) - l3_qos_cfg_update(&hw_res->cdp_enabled); -} - -static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 num_closid = resctrl_arch_get_num_closid(r); - int cpu = cpumask_any(&d->cpu_mask); - int i; - - d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), - GFP_KERNEL, cpu_to_node(cpu)); - if (!d->mbps_val) - return -ENOMEM; - - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - - return 0; -} - -static void mba_sc_domain_destroy(struct rdt_resource *r, - struct rdt_domain *d) -{ - kfree(d->mbps_val); - d->mbps_val = NULL; -} - -/* - * MBA software controller is supported only if - * MBM is supported and MBA is in linear scale. - */ -static bool supports_mba_mbps(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - return (resctrl_arch_is_mbm_local_enabled() && - r->alloc_capable && is_mba_linear()); -} - -/* - * Enable or disable the MBA software controller - * which helps user specify bandwidth in MBps. - */ -static int set_mba_sc(bool mba_sc) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rdt_domain *d; - int i; - - if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) - return -EINVAL; - - r->membw.mba_sc = mba_sc; - - list_for_each_entry(d, &r->domains, list) { - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - } - - return 0; -} - -static int cdp_enable(int level) -{ - struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; - int ret; - - if (!r_l->alloc_capable) - return -EINVAL; - - ret = set_cache_qos_cfg(level, true); - if (!ret) - rdt_resources_all[level].cdp_enabled = true; - - return ret; -} - -static void cdp_disable(int level) -{ - struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - - if (r_hw->cdp_enabled) { - set_cache_qos_cfg(level, false); - r_hw->cdp_enabled = false; - } -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) -{ - struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - - if (!hw_res->r_resctrl.cdp_capable) - return -EINVAL; - - if (enable) - return cdp_enable(l); - - cdp_disable(l); - - return 0; -} - -/* - * We don't allow rdtgroup directories to be created anywhere - * except the root directory. Thus when looking for the rdtgroup - * structure for a kernfs node we are either looking at a directory, - * in which case the rdtgroup structure is pointed at by the "priv" - * field, otherwise we have a file, and need only look to the parent - * to find the rdtgroup. - */ -static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) -{ - if (kernfs_type(kn) == KERNFS_DIR) { - /* - * All the resource directories use "kn->priv" - * to point to the "struct rdtgroup" for the - * resource. "info" and its subdirectories don't - * have rdtgroup structures, so return NULL here. - */ - if (kn == kn_info || kn->parent == kn_info) - return NULL; - else - return kn->priv; - } else { - return kn->parent->priv; - } -} - -static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - atomic_inc(&rdtgrp->waitcount); - kernfs_break_active_protection(kn); -} - -static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - if (atomic_dec_and_test(&rdtgrp->waitcount) && - (rdtgrp->flags & RDT_DELETED)) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - kernfs_unbreak_active_protection(kn); - rdtgroup_remove(rdtgrp); - } else { - kernfs_unbreak_active_protection(kn); - } -} - -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return NULL; - - rdtgroup_kn_get(rdtgrp, kn); - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - /* Was this group deleted while we waited? */ - if (rdtgrp->flags & RDT_DELETED) - return NULL; - - return rdtgrp; -} - -void rdtgroup_kn_unlock(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return; - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - rdtgroup_kn_put(rdtgrp, kn); -} - -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **mon_data_kn); - -static void rdt_disable_ctx(void) -{ - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); - set_mba_sc(false); - - resctrl_debug = false; -} - -static int rdt_enable_ctx(struct rdt_fs_context *ctx) -{ - int ret = 0; - - if (ctx->enable_cdpl2) { - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); - if (ret) - goto out_done; - } - - if (ctx->enable_cdpl3) { - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); - if (ret) - goto out_cdpl2; - } - - if (ctx->enable_mba_mbps) { - ret = set_mba_sc(true); - if (ret) - goto out_cdpl3; - } - - if (ctx->enable_debug) - resctrl_debug = true; - - return 0; - -out_cdpl3: - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); -out_cdpl2: - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -out_done: - return ret; -} - -static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) -{ - struct resctrl_schema *s; - const char *suffix = ""; - int ret, cl; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - - s->res = r; - s->num_closid = resctrl_arch_get_num_closid(r); - if (resctrl_arch_get_cdp_enabled(r->rid)) - s->num_closid /= 2; - - s->conf_type = type; - switch (type) { - case CDP_CODE: - suffix = "CODE"; - break; - case CDP_DATA: - suffix = "DATA"; - break; - case CDP_NONE: - suffix = ""; - break; - } - - ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); - if (ret >= sizeof(s->name)) { - kfree(s); - return -EINVAL; - } - - cl = strlen(s->name); - - /* - * If CDP is supported by this resource, but not enabled, - * include the suffix. This ensures the tabular format of the - * schemata file does not change between mounts of the filesystem. - */ - if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) - cl += 4; - - if (cl > max_name_width) - max_name_width = cl; - - /* - * Choose a width for the resource data based on the resource that has - * widest name and cbm. - */ - max_data_width = max(max_data_width, r->data_width); - - INIT_LIST_HEAD(&s->list); - list_add(&s->list, &resctrl_schema_all); - - return 0; -} - -static int schemata_list_create(void) -{ - enum resctrl_res_level i; - struct rdt_resource *r; - int ret = 0; - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; - - if (resctrl_arch_get_cdp_enabled(r->rid)) { - ret = schemata_list_add(r, CDP_CODE); - if (ret) - break; - - ret = schemata_list_add(r, CDP_DATA); - } else { - ret = schemata_list_add(r, CDP_NONE); - } - - if (ret) - break; - } - - return ret; -} - -static void schemata_list_destroy(void) -{ - struct resctrl_schema *s, *tmp; - - list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { - list_del(&s->list); - kfree(s); - } -} - -static int rdt_get_tree(struct fs_context *fc) -{ - struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdt_fs_context *ctx = rdt_fc2context(fc); - unsigned long flags = RFTYPE_CTRL_BASE; - struct rdt_domain *dom; - int ret; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - /* - * resctrl file system can only be mounted once. - */ - if (resctrl_mounted) { - ret = -EBUSY; - goto out; - } - - ret = rdtgroup_setup_root(ctx); - if (ret) - goto out; - - ret = rdt_enable_ctx(ctx); - if (ret) - goto out_root; - - ret = schemata_list_create(); - if (ret) { - schemata_list_destroy(); - goto out_ctx; - } - - closid_init(); - - if (resctrl_arch_mon_capable()) - flags |= RFTYPE_MON; - - ret = rdtgroup_add_files(rdtgroup_default.kn, flags); - if (ret) - goto out_schemata_free; - - kernfs_activate(rdtgroup_default.kn); - - ret = rdtgroup_create_info_dir(rdtgroup_default.kn); - if (ret < 0) - goto out_schemata_free; - - if (resctrl_arch_mon_capable()) { - ret = mongroup_create_dir(rdtgroup_default.kn, - &rdtgroup_default, "mon_groups", - &kn_mongrp); - if (ret < 0) - goto out_info; - - ret = mkdir_mondata_all(rdtgroup_default.kn, - &rdtgroup_default, &kn_mondata); - if (ret < 0) - goto out_mongrp; - rdtgroup_default.mon.mon_data_kn = kn_mondata; - } - - ret = rdt_pseudo_lock_init(); - if (ret) - goto out_mondata; - - ret = kernfs_get_tree(fc); - if (ret < 0) - goto out_psl; - - if (resctrl_arch_alloc_capable()) - resctrl_arch_enable_alloc(); - if (resctrl_arch_mon_capable()) - resctrl_arch_enable_mon(); - - if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) - resctrl_mounted = true; - - if (resctrl_is_mbm_enabled()) { - list_for_each_entry(dom, &l3->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, - RESCTRL_PICK_ANY_CPU); - } - - goto out; - -out_psl: - rdt_pseudo_lock_release(); -out_mondata: - if (resctrl_arch_mon_capable()) - kernfs_remove(kn_mondata); -out_mongrp: - if (resctrl_arch_mon_capable()) - kernfs_remove(kn_mongrp); -out_info: - kernfs_remove(kn_info); -out_schemata_free: - schemata_list_destroy(); -out_ctx: - rdt_disable_ctx(); -out_root: - rdtgroup_destroy_root(); -out: - rdt_last_cmd_clear(); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -enum rdt_param { - Opt_cdp, - Opt_cdpl2, - Opt_mba_mbps, - Opt_debug, - nr__rdt_params -}; - -static const struct fs_parameter_spec rdt_fs_parameters[] = { - fsparam_flag("cdp", Opt_cdp), - fsparam_flag("cdpl2", Opt_cdpl2), - fsparam_flag("mba_MBps", Opt_mba_mbps), - fsparam_flag("debug", Opt_debug), - {} -}; - -static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct fs_parse_result result; - int opt; - - opt = fs_parse(fc, rdt_fs_parameters, param, &result); - if (opt < 0) - return opt; - - switch (opt) { - case Opt_cdp: - ctx->enable_cdpl3 = true; - return 0; - case Opt_cdpl2: - ctx->enable_cdpl2 = true; - return 0; - case Opt_mba_mbps: - if (!supports_mba_mbps()) - return -EINVAL; - ctx->enable_mba_mbps = true; - return 0; - case Opt_debug: - ctx->enable_debug = true; - return 0; - } - - return -EINVAL; -} - -static void rdt_fs_context_free(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - - kernfs_free_fs_context(fc); - kfree(ctx); -} - -static const struct fs_context_operations rdt_fs_context_ops = { - .free = rdt_fs_context_free, - .parse_param = rdt_parse_param, - .get_tree = rdt_get_tree, -}; - -static int rdt_init_fs_context(struct fs_context *fc) -{ - struct rdt_fs_context *ctx; - - ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; - fc->fs_private = &ctx->kfc; - fc->ops = &rdt_fs_context_ops; - put_user_ns(fc->user_ns); - fc->user_ns = get_user_ns(&init_user_ns); - fc->global = true; - return 0; -} - -static int reset_all_ctrls(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - struct rdt_hw_domain *hw_dom; - struct msr_param msr_param; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int i; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - msr_param.res = r; - msr_param.low = 0; - msr_param.high = hw_res->num_closid; - - /* - * Disable resource control for this resource by setting all - * CBMs in all domains to the maximum mask value. Pick one CPU - * from each domain to update the MSRs below. - */ - list_for_each_entry(d, &r->domains, list) { - hw_dom = resctrl_to_arch_dom(d); - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - - for (i = 0; i < hw_res->num_closid; i++) - hw_dom->ctrl_val[i] = r->default_ctrl; - } - - /* Update CBM on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -void resctrl_arch_reset_resources(void) -{ - struct rdt_resource *r; - - for_each_capable_rdt_resource(r) - reset_all_ctrls(r); -} - -/* - * Move tasks from one to the other group. If @from is NULL, then all tasks - * in the systems are moved unconditionally (used for teardown). - * - * If @mask is not NULL the cpus on which moved tasks are running are set - * in that mask so the update smp function call is restricted to affected - * cpus. - */ -static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, - struct cpumask *mask) -{ - struct task_struct *p, *t; - - read_lock(&tasklist_lock); - for_each_process_thread(p, t) { - if (!from || is_closid_match(t, from) || - is_rmid_match(t, from)) { - resctrl_arch_set_closid_rmid(t, to->closid, - to->mon.rmid); - - /* - * Order the closid/rmid stores above before the loads - * in task_curr(). This pairs with the full barrier - * between the rq->curr update and - * resctrl_arch_sched_in() during context switch. - */ - smp_mb(); - - /* - * If the task is on a CPU, set the CPU in the mask. - * The detection is inaccurate as tasks might move or - * schedule before the smp function call takes place. - * In such a case the function call is pointless, but - * there is no other side effect. - */ - if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) - cpumask_set_cpu(task_cpu(t), mask); - } - } - read_unlock(&tasklist_lock); -} - -static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) -{ - struct rdtgroup *sentry, *stmp; - struct list_head *head; - - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->closid, sentry->mon.rmid); - list_del(&sentry->mon.crdtgrp_list); - - if (atomic_read(&sentry->waitcount) != 0) - sentry->flags = RDT_DELETED; - else - rdtgroup_remove(sentry); - } -} - -/* - * Forcibly remove all of subdirectories under root. - */ -static void rmdir_all_sub(void) -{ - struct rdtgroup *rdtgrp, *tmp; - - /* Move all tasks to the default resource group */ - rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); - - list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { - /* Free any child rmids */ - free_all_child_rdtgrp(rdtgrp); - - /* Remove each rdtgroup other than root */ - if (rdtgrp == &rdtgroup_default) - continue; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - - /* - * Give any CPUs back to the default group. We cannot copy - * cpu_online_mask because a CPU might have executed the - * offline callback already, but is still marked online. - */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - kernfs_remove(rdtgrp->kn); - list_del(&rdtgrp->rdtgroup_list); - - if (atomic_read(&rdtgrp->waitcount) != 0) - rdtgrp->flags = RDT_DELETED; - else - rdtgroup_remove(rdtgrp); - } - /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - update_closid_rmid(cpu_online_mask, &rdtgroup_default); - - kernfs_remove(kn_info); - kernfs_remove(kn_mongrp); - kernfs_remove(kn_mondata); -} - -static void rdt_kill_sb(struct super_block *sb) -{ - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_disable_ctx(); - - /* Put everything back to default values. */ - resctrl_arch_reset_resources(); - - rmdir_all_sub(); - rdt_pseudo_lock_release(); - rdtgroup_default.mode = RDT_MODE_SHAREABLE; - schemata_list_destroy(); - rdtgroup_destroy_root(); - if (resctrl_arch_alloc_capable()) - resctrl_arch_disable_alloc(); - if (resctrl_arch_mon_capable()) - resctrl_arch_disable_mon(); - resctrl_mounted = false; - kernfs_kill_sb(sb); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -static struct file_system_type rdt_fs_type = { - .name = "resctrl", - .init_fs_context = rdt_init_fs_context, - .parameters = rdt_fs_parameters, - .kill_sb = rdt_kill_sb, -}; - -static int mon_addfile(struct kernfs_node *parent_kn, const char *name, - void *priv) -{ - struct kernfs_node *kn; - int ret = 0; - - kn = __kernfs_create_file(parent_kn, name, 0444, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, - &kf_mondata_ops, priv, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; - } - - return ret; -} - -/* - * Remove all subdirectories of mon_data of ctrl_mon groups - * and monitor groups with given domain id. - */ -static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - unsigned int dom_id) -{ - struct rdtgroup *prgrp, *crgrp; - char name[32]; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - sprintf(name, "mon_%s_%02d", r->name, dom_id); - kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); - - list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) - kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); - } -} - -static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, - struct rdt_domain *d, - struct rdt_resource *r, struct rdtgroup *prgrp) -{ - union mon_data_bits priv; - struct kernfs_node *kn; - struct mon_evt *mevt; - struct rmid_read rr; - char name[32]; - int ret; - - sprintf(name, "mon_%s_%02d", r->name, d->id); - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - if (WARN_ON(list_empty(&r->evt_list))) { - ret = -EPERM; - goto out_destroy; - } - - priv.u.rid = r->rid; - priv.u.domid = d->id; - list_for_each_entry(mevt, &r->evt_list, list) { - priv.u.evtid = mevt->evtid; - ret = mon_addfile(kn, mevt->name, priv.priv); - if (ret) - goto out_destroy; - - if (resctrl_is_mbm_event(mevt->evtid)) - mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); - } - kernfs_activate(kn); - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/* - * Add all subdirectories of mon_data for "ctrl_mon" groups - * and "monitor" groups with given domain id. - */ -static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - struct rdt_domain *d) -{ - struct kernfs_node *parent_kn; - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - parent_kn = prgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, prgrp); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - parent_kn = crgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, crgrp); - } - } -} - -static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, - struct rdt_resource *r, - struct rdtgroup *prgrp) -{ - struct rdt_domain *dom; - int ret; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - list_for_each_entry(dom, &r->domains, list) { - ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); - if (ret) - return ret; - } - - return 0; -} - -/* - * This creates a directory mon_data which contains the monitored data. - * - * mon_data has one directory for each domain which are named - * in the format mon__. For ex: A mon_data - * with L3 domain looks as below: - * ./mon_data: - * mon_L3_00 - * mon_L3_01 - * mon_L3_02 - * ... - * - * Each domain directory has one file per event: - * ./mon_L3_00/: - * llc_occupancy - * - */ -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **dest_kn) -{ - enum resctrl_res_level i; - struct rdt_resource *r; - struct kernfs_node *kn; - int ret; - - /* - * Create the mon_data directory first. - */ - ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); - if (ret) - return ret; - - if (dest_kn) - *dest_kn = kn; - - /* - * Create the subdirectories for each domain. Note that all events - * in a domain like L3 are grouped into a resource whose domain is L3 - */ - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->mon_capable) - continue; - - ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); - if (ret) - goto out_destroy; - } - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/** - * cbm_ensure_valid - Enforce validity on provided CBM - * @_val: Candidate CBM - * @r: RDT resource to which the CBM belongs - * - * The provided CBM represents all cache portions available for use. This - * may be represented by a bitmap that does not consist of contiguous ones - * and thus be an invalid CBM. - * Here the provided CBM is forced to be a valid CBM by only considering - * the first set of contiguous bits as valid and clearing all bits. - * The intention here is to provide a valid default CBM with which a new - * resource group is initialized. The user can follow this with a - * modification to the CBM if the default does not satisfy the - * requirements. - */ -static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) -{ - unsigned int cbm_len = r->cache.cbm_len; - unsigned long first_bit, zero_bit; - unsigned long val = _val; - - if (!val) - return 0; - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(&val, zero_bit, cbm_len - zero_bit); - return (u32)val; -} - -/* - * Initialize cache resources per RDT domain - * - * Set the RDT domain up to start off with all usable allocations. That is, - * all shareable and unused bits. All-zero CBM is invalid. - */ -static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, - u32 closid) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 used_b = 0, unused_b = 0; - unsigned long tmp_cbm; - enum rdtgrp_mode mode; - u32 peer_ctl, ctrl_val; - int i; - - cfg = &d->staged_config[t]; - cfg->have_new_ctrl = false; - cfg->new_ctrl = r->cache.shareable_bits; - used_b = r->cache.shareable_bits; - for (i = 0; i < closids_supported(); i++) { - if (closid_allocated(i) && i != closid) { - mode = rdtgroup_mode_by_closid(i); - if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - /* - * ctrl values for locksetup aren't relevant - * until the schemata is written, and the mode - * becomes RDT_MODE_PSEUDO_LOCKED. - */ - continue; - /* - * If CDP is active include peer domain's - * usage to ensure there is no overlap - * with an exclusive group. - */ - if (resctrl_arch_get_cdp_enabled(r->rid)) - peer_ctl = resctrl_arch_get_config(r, d, i, - peer_type); - else - peer_ctl = 0; - ctrl_val = resctrl_arch_get_config(r, d, i, - s->conf_type); - used_b |= ctrl_val | peer_ctl; - if (mode == RDT_MODE_SHAREABLE) - cfg->new_ctrl |= ctrl_val | peer_ctl; - } - } - if (d->plr && d->plr->cbm > 0) - used_b |= d->plr->cbm; - unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); - unused_b &= BIT_MASK(r->cache.cbm_len) - 1; - cfg->new_ctrl |= unused_b; - /* - * Force the initial CBM to be valid, user can - * modify the CBM based on system availability. - */ - cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); - /* - * Assign the u32 CBM to an unsigned long to ensure that - * bitmap_weight() does not access out-of-bound memory. - */ - tmp_cbm = cfg->new_ctrl; - if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); - return -ENOSPC; - } - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Initialize cache resources with default values. - * - * A new RDT group is being created on an allocation capable (CAT) - * supporting system. Set this group up to start off with all usable - * allocations. - * - * If there are no more shareable bits available on any domain then - * the entire allocation will fail. - */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) -{ - struct rdt_domain *d; - int ret; - - list_for_each_entry(d, &s->res->domains, list) { - ret = __init_one_rdt_domain(d, s, closid); - if (ret < 0) - return ret; - } - - return 0; -} - -/* Initialize MBA resource with default values. */ -static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) -{ - struct resctrl_staged_config *cfg; - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - if (is_mba_sc(r)) { - d->mbps_val[closid] = MBA_MAX_MBPS; - continue; - } - - cfg = &d->staged_config[CDP_NONE]; - cfg->new_ctrl = r->default_ctrl; - cfg->have_new_ctrl = true; - } -} - -/* Initialize the RDT group's allocations. */ -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - int ret = 0; - - rdt_staged_configs_clear(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) { - rdtgroup_init_mba(r, rdtgrp->closid); - if (is_mba_sc(r)) - continue; - } else { - ret = rdtgroup_init_cat(s, rdtgrp->closid); - if (ret < 0) - goto out; - } - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Failed to initialize allocations\n"); - goto out; - } - - } - - rdtgrp->mode = RDT_MODE_SHAREABLE; - -out: - rdt_staged_configs_clear(); - return ret; -} - -static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) -{ - int ret; - - if (!resctrl_arch_mon_capable()) - return 0; - - ret = alloc_rmid(rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - - ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - return ret; - } - - return 0; -} - -static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) -{ - if (resctrl_arch_mon_capable()) - free_rmid(rgrp->closid, rgrp->mon.rmid); -} - -static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, - const char *name, umode_t mode, - enum rdt_group_type rtype, struct rdtgroup **r) -{ - struct rdtgroup *prdtgrp, *rdtgrp; - unsigned long files = 0; - struct kernfs_node *kn; - int ret; - - prdtgrp = rdtgroup_kn_lock_live(parent_kn); - if (!prdtgrp) { - ret = -ENODEV; - goto out_unlock; - } - - if (rtype == RDTMON_GROUP && - (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto out_unlock; - } - - /* allocate the rdtgroup. */ - rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); - if (!rdtgrp) { - ret = -ENOSPC; - rdt_last_cmd_puts("Kernel out of memory\n"); - goto out_unlock; - } - *r = rdtgrp; - rdtgrp->mon.parent = prdtgrp; - rdtgrp->type = rtype; - INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); - - /* kernfs creates the directory for rdtgrp */ - kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); - if (IS_ERR(kn)) { - ret = PTR_ERR(kn); - rdt_last_cmd_puts("kernfs create error\n"); - goto out_free_rgrp; - } - rdtgrp->kn = kn; - - /* - * kernfs_remove() will drop the reference count on "kn" which - * will free it. But we still need it to stick around for the - * rdtgroup_kn_unlock(kn) call. Take one extra reference here, - * which will be dropped by kernfs_put() in rdtgroup_remove(). - */ - kernfs_get(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - rdt_last_cmd_puts("kernfs perm error\n"); - goto out_destroy; - } - - if (rtype == RDTCTRL_GROUP) { - files = RFTYPE_BASE | RFTYPE_CTRL; - if (resctrl_arch_mon_capable()) - files |= RFTYPE_MON; - } else { - files = RFTYPE_BASE | RFTYPE_MON; - } - - ret = rdtgroup_add_files(kn, files); - if (ret) { - rdt_last_cmd_puts("kernfs fill error\n"); - goto out_destroy; - } - - /* - * The caller unlocks the parent_kn upon success. - */ - return 0; - -out_destroy: - kernfs_put(rdtgrp->kn); - kernfs_remove(rdtgrp->kn); -out_free_rgrp: - kfree(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) -{ - kernfs_remove(rgrp->kn); - rdtgroup_remove(rgrp); -} - -/* - * Create a monitor group under "mon_groups" directory of a control - * and monitor group(ctrl_mon). This is a resource group - * to monitor a subset of tasks and cpus in its parent ctrl_mon group. - */ -static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp, *prgrp; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); - if (ret) - return ret; - - prgrp = rdtgrp->mon.parent; - rdtgrp->closid = prgrp->closid; - - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) { - mkdir_rdt_prepare_clean(rdtgrp); - goto out_unlock; - } - - kernfs_activate(rdtgrp->kn); - - /* - * Add the rdtgrp to the list of rdtgrps the parent - * ctrl_mon group has to track. - */ - list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); - -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * These are rdtgroups created under the root directory. Can be used - * to allocate and monitor resources. - */ -static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp; - struct kernfs_node *kn; - u32 closid; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); - if (ret) - return ret; - - kn = rdtgrp->kn; - ret = closid_alloc(); - if (ret < 0) { - rdt_last_cmd_puts("Out of CLOSIDs\n"); - goto out_common_fail; - } - closid = ret; - ret = 0; - - rdtgrp->closid = closid; - - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) - goto out_closid_free; - - kernfs_activate(rdtgrp->kn); - - ret = rdtgroup_init_alloc(rdtgrp); - if (ret < 0) - goto out_rmid_free; - - list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - - if (resctrl_arch_mon_capable()) { - /* - * Create an empty mon_groups directory to hold the subset - * of tasks and cpus to monitor. - */ - ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_del_list; - } - } - - goto out_unlock; - -out_del_list: - list_del(&rdtgrp->rdtgroup_list); -out_rmid_free: - mkdir_rdt_prepare_rmid_free(rdtgrp); -out_closid_free: - closid_free(closid); -out_common_fail: - mkdir_rdt_prepare_clean(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * We allow creating mon groups only with in a directory called "mon_groups" - * which is present in every ctrl_mon group. Check if this is a valid - * "mon_groups" directory. - * - * 1. The directory should be named "mon_groups". - * 2. The mon group itself should "not" be named "mon_groups". - * This makes sure "mon_groups" directory always has a ctrl_mon group - * as parent. - */ -static bool is_mon_groups(struct kernfs_node *kn, const char *name) -{ - return (!strcmp(kn->name, "mon_groups") && - strcmp(name, "mon_groups")); -} - -static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, - umode_t mode) -{ - /* Do not accept '\n' to avoid unparsable situation. */ - if (strchr(name, '\n')) - return -EINVAL; - - /* - * If the parent directory is the root directory and RDT - * allocation is supported, add a control and monitoring - * subdirectory - */ - if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) - return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); - - /* - * If RDT monitoring is supported and the parent directory is a valid - * "mon_groups" directory, add a monitoring subdirectory. - */ - if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) - return rdtgroup_mkdir_mon(parent_kn, name, mode); - - return -EPERM; -} - -static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - u32 closid, rmid; - int cpu; - - /* Give any tasks back to the parent group */ - rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); - - /* Update per cpu rmid of the moved CPUs first */ - closid = rdtgrp->closid; - rmid = prdtgrp->mon.rmid; - for_each_cpu(cpu, &rdtgrp->cpu_mask) - resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - /* - * Remove the rdtgrp from the parent ctrl_mon group's list - */ - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_del(&rdtgrp->mon.crdtgrp_list); - - kernfs_remove(rdtgrp->kn); - - return 0; -} - -static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) -{ - rdtgrp->flags = RDT_DELETED; - list_del(&rdtgrp->rdtgroup_list); - - kernfs_remove(rdtgrp->kn); - return 0; -} - -static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - u32 closid, rmid; - int cpu; - - /* Give any tasks back to the default group */ - rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); - - /* Give any CPUs back to the default group */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - /* Update per cpu closid and rmid of the moved CPUs first */ - closid = rdtgroup_default.closid; - rmid = rdtgroup_default.mon.rmid; - for_each_cpu(cpu, &rdtgrp->cpu_mask) - resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - closid_free(rdtgrp->closid); - - rdtgroup_ctrl_remove(rdtgrp); - - /* - * Free all the child monitor group rmids. - */ - free_all_child_rdtgrp(rdtgrp); - - return 0; -} - -static int rdtgroup_rmdir(struct kernfs_node *kn) -{ - struct kernfs_node *parent_kn = kn->parent; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret = 0; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - - rdtgrp = rdtgroup_kn_lock_live(kn); - if (!rdtgrp) { - ret = -EPERM; - goto out; - } - - /* - * If the rdtgroup is a ctrl_mon group and parent directory - * is the root directory, remove the ctrl_mon group. - * - * If the rdtgroup is a mon group and parent directory - * is a valid "mon_groups" directory, remove the mon group. - */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && - rdtgrp != &rdtgroup_default) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = rdtgroup_ctrl_remove(rdtgrp); - } else { - ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); - } - } else if (rdtgrp->type == RDTMON_GROUP && - is_mon_groups(parent_kn, kn->name)) { - ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); - } else { - ret = -EPERM; - } - -out: - rdtgroup_kn_unlock(kn); - free_cpumask_var(tmpmask); - return ret; -} - -/** - * mongrp_reparent() - replace parent CTRL_MON group of a MON group - * @rdtgrp: the MON group whose parent should be replaced - * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp - * @cpus: cpumask provided by the caller for use during this call - * - * Replaces the parent CTRL_MON group for a MON group, resulting in all member - * tasks' CLOSID immediately changing to that of the new parent group. - * Monitoring data for the group is unaffected by this operation. - */ -static void mongrp_reparent(struct rdtgroup *rdtgrp, - struct rdtgroup *new_prdtgrp, - cpumask_var_t cpus) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - - WARN_ON(rdtgrp->type != RDTMON_GROUP); - WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); - - /* Nothing to do when simply renaming a MON group. */ - if (prdtgrp == new_prdtgrp) - return; - - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_move_tail(&rdtgrp->mon.crdtgrp_list, - &new_prdtgrp->mon.crdtgrp_list); - - rdtgrp->mon.parent = new_prdtgrp; - rdtgrp->closid = new_prdtgrp->closid; - - /* Propagate updated closid to all tasks in this group. */ - rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); - - update_closid_rmid(cpus, NULL); -} - -static int rdtgroup_rename(struct kernfs_node *kn, - struct kernfs_node *new_parent, const char *new_name) -{ - struct rdtgroup *new_prdtgrp; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret; - - rdtgrp = kernfs_to_rdtgroup(kn); - new_prdtgrp = kernfs_to_rdtgroup(new_parent); - if (!rdtgrp || !new_prdtgrp) - return -ENOENT; - - /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ - rdtgroup_kn_get(rdtgrp, kn); - rdtgroup_kn_get(new_prdtgrp, new_parent); - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - /* - * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if - * either kernfs_node is a file. - */ - if (kernfs_type(kn) != KERNFS_DIR || - kernfs_type(new_parent) != KERNFS_DIR) { - rdt_last_cmd_puts("Source and destination must be directories"); - ret = -EPERM; - goto out; - } - - if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { - ret = -ENOENT; - goto out; - } - - if (rdtgrp->type != RDTMON_GROUP || !kn->parent || - !is_mon_groups(kn->parent, kn->name)) { - rdt_last_cmd_puts("Source must be a MON group\n"); - ret = -EPERM; - goto out; - } - - if (!is_mon_groups(new_parent, new_name)) { - rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); - ret = -EPERM; - goto out; - } - - /* - * If the MON group is monitoring CPUs, the CPUs must be assigned to the - * current parent CTRL_MON group and therefore cannot be assigned to - * the new parent, making the move illegal. - */ - if (!cpumask_empty(&rdtgrp->cpu_mask) && - rdtgrp->mon.parent != new_prdtgrp) { - rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); - ret = -EPERM; - goto out; - } - - /* - * Allocate the cpumask for use in mongrp_reparent() to avoid the - * possibility of failing to allocate it after kernfs_rename() has - * succeeded. - */ - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } - - /* - * Perform all input validation and allocations needed to ensure - * mongrp_reparent() will succeed before calling kernfs_rename(), - * otherwise it would be necessary to revert this call if - * mongrp_reparent() failed. - */ - ret = kernfs_rename(kn, new_parent, new_name); - if (!ret) - mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); - - free_cpumask_var(tmpmask); - -out: - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); - rdtgroup_kn_put(new_prdtgrp, new_parent); - return ret; -} - -static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - seq_puts(seq, ",cdp"); - - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - seq_puts(seq, ",cdpl2"); - - if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) - seq_puts(seq, ",mba_MBps"); - - if (resctrl_debug) - seq_puts(seq, ",debug"); - - return 0; -} - -static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { - .mkdir = rdtgroup_mkdir, - .rmdir = rdtgroup_rmdir, - .rename = rdtgroup_rename, - .show_options = rdtgroup_show_options, -}; - -static int rdtgroup_setup_root(struct rdt_fs_context *ctx) -{ - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, - KERNFS_ROOT_CREATE_DEACTIVATED | - KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, - &rdtgroup_default); - if (IS_ERR(rdt_root)) - return PTR_ERR(rdt_root); - - ctx->kfc.root = rdt_root; - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - - return 0; -} - -static void rdtgroup_destroy_root(void) -{ - kernfs_destroy_root(rdt_root); - rdtgroup_default.kn = NULL; -} - -static void rdtgroup_setup_default(void) -{ - mutex_lock(&rdtgroup_mutex); - - rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; - rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; - rdtgroup_default.type = RDTCTRL_GROUP; - INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); - - list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - - mutex_unlock(&rdtgroup_mutex); -} - -static void domain_destroy_mon_state(struct rdt_domain *d) -{ - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); -} - -void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - mutex_lock(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - mba_sc_domain_destroy(r, d); - - if (!r->mon_capable) - goto out_unlock; - - /* - * If resctrl is mounted, remove all the - * per domain monitor data directories. - */ - if (resctrl_mounted && resctrl_arch_mon_capable()) - rmdir_mondata_subdir_allrdtgrp(r, d->id); - - if (resctrl_is_mbm_enabled()) - cancel_delayed_work(&d->mbm_over); - if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { - /* - * When a package is going down, forcefully - * decrement rmid->ebusy. There is no way to know - * that the L3 was flushed and hence may lead to - * incorrect counts in rare scenarios, but leaving - * the RMID as busy creates RMID leaks if the - * package never comes back. - */ - __check_limbo(d, true); - cancel_delayed_work(&d->cqm_limbo); - } - - domain_destroy_mon_state(d); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - size_t tsize; - - if (resctrl_arch_is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); - if (!d->rmid_busy_llc) - return -ENOMEM; - } - if (resctrl_arch_is_mbm_total_enabled()) { - tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); - if (!d->mbm_total) { - bitmap_free(d->rmid_busy_llc); - return -ENOMEM; - } - } - if (resctrl_arch_is_mbm_local_enabled()) { - tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); - if (!d->mbm_local) { - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - return -ENOMEM; - } - } - - return 0; -} - -int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - int err = 0; - - mutex_lock(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { - /* RDT_RESOURCE_MBA is never mon_capable */ - err = mba_sc_domain_allocate(r, d); - goto out_unlock; - } - - if (!r->mon_capable) - goto out_unlock; - - err = domain_setup_mon_state(r, d); - if (err) - goto out_unlock; - - if (resctrl_is_mbm_enabled()) { - INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, - RESCTRL_PICK_ANY_CPU); - } - - if (resctrl_arch_is_llc_occupancy_enabled()) - INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - - /* - * If the filesystem is not mounted then only the default resource group - * exists. Creation of its directories is deferred until mount time - * by rdt_get_tree() calling mkdir_mondata_all(). - * If resctrl is mounted, add per domain monitor data directories. - */ - if (resctrl_mounted && resctrl_arch_mon_capable()) - mkdir_mondata_subdir_allrdtgrp(r, d); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - - return err; -} - -void resctrl_online_cpu(unsigned int cpu) -{ - mutex_lock(&rdtgroup_mutex); - /* The CPU is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); - mutex_unlock(&rdtgroup_mutex); -} - -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) -{ - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) - break; - } -} - -void resctrl_offline_cpu(unsigned int cpu) -{ - struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdtgroup *rdtgrp; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } - - if (!l3->mon_capable) - goto out_unlock; - - d = resctrl_get_domain_from_cpu(cpu, l3); - if (d) { - if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0, cpu); - } - if (resctrl_arch_is_llc_occupancy_enabled() && - cpu == d->cqm_work_cpu && has_busy_rmid(d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0, cpu); - } - } - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -/* - * resctrl_init - resctrl filesystem initialization - * - * Setup resctrl file system including set up root, create mount point, - * register resctrl filesystem, and initialize files under root directory. - * - * Return: 0 on success or -errno - */ -int resctrl_init(void) -{ - int ret = 0; - - seq_buf_init(&last_cmd_status, last_cmd_status_buf, - sizeof(last_cmd_status_buf)); - - rdtgroup_setup_default(); - - thread_throttle_mode_init(); - - ret = resctrl_mon_resource_init(); - if (ret) - return ret; - - ret = sysfs_create_mount_point(fs_kobj, "resctrl"); - if (ret) - return ret; - - ret = register_filesystem(&rdt_fs_type); - if (ret) - goto cleanup_mountpoint; - - /* - * Adding the resctrl debugfs directory here may not be ideal since - * it would let the resctrl debugfs directory appear on the debugfs - * filesystem before the resctrl filesystem is mounted. - * It may also be ok since that would enable debugging of RDT before - * resctrl is mounted. - * The reason why the debugfs directory is created here and not in - * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and - * during the debugfs directory creation also &sb->s_type->i_mutex_key - * (the lockdep class of inode->i_rwsem). Other filesystem - * interactions (eg. SyS_getdents) have the lock ordering: - * &sb->s_type->i_mutex_key --> &mm->mmap_lock - * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex - * is taken, thus creating dependency: - * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause - * issues considering the other two lock dependencies. - * By creating the debugfs directory here we avoid a dependency - * that may cause deadlock (even though file operations cannot - * occur until the filesystem is mounted, but I do not know how to - * tell lockdep that). - */ - debugfs_resctrl = debugfs_create_dir("resctrl", NULL); - - return 0; - -cleanup_mountpoint: - sysfs_remove_mount_point(fs_kobj, "resctrl"); - - return ret; -} - -void resctrl_exit(void) -{ - debugfs_remove_recursive(debugfs_resctrl); - unregister_filesystem(&rdt_fs_type); - sysfs_remove_mount_point(fs_kobj, "resctrl"); - - resctrl_mon_resource_exit(); + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); } diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c index e69de29bb2d1..a8f2dd66ede3 100644 --- a/fs/resctrl/ctrlmondata.c +++ b/fs/resctrl/ctrlmondata.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Cache Allocation code. + * + * Copyright (C) 2016 Intel Corporation + * + * Authors: + * Fenghua Yu + * Tony Luck + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "internal.h" + +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and max bandwidth values specified by the + * hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) +{ + unsigned long bw; + int ret; + + /* + * Only linear delay values is supported for current Intel SKUs. + */ + if (!r->membw.delay_linear && r->membw.arch_needs_linear) { + rdt_last_cmd_puts("No support for non-linear MB domains\n"); + return false; + } + + ret = kstrtoul(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + return false; + } + + if ((bw < r->membw.min_bw || bw > r->default_ctrl) && + !is_mba_sc(r)) { + rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct resctrl_staged_config *cfg; + u32 closid = data->rdtgrp->closid; + struct rdt_resource *r = s->res; + unsigned long bw_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate(data->buf, &bw_val, r)) + return -EINVAL; + + if (is_mba_sc(r)) { + d->mbps_val[closid] = bw_val; + return 0; + } + + cfg->new_ctrl = bw_val; + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Check whether a cache bit mask is valid. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. + * AMD allows non-contiguous bitmasks. + */ +static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long first_bit, zero_bit, val; + unsigned int cbm_len = r->cache.cbm_len; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && + (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); + return false; + } + + if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("Need at least %d bits in the mask\n", + r->cache.min_cbm_bits); + return false; + } + + *data = val; + return true; +} + +/* + * Read one cache bit mask (hex). Check that it is valid for the current + * resource type. + */ +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct rdtgroup *rdtgrp = data->rdtgrp; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 cbm_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + /* + * Cannot set up more than one pseudo-locked region in a cache + * hierarchy. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + rdtgroup_pseudo_locked_in_hierarchy(d)) { + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); + return -EINVAL; + } + + if (!cbm_validate(data->buf, &cbm_val, r)) + return -EINVAL; + + if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_SHAREABLE) && + rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); + return -EINVAL; + } + + /* + * The CBM may not overlap with the CBM of another closid if + * either is exclusive. + */ + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { + rdt_last_cmd_puts("Overlaps with exclusive group\n"); + return -EINVAL; + } + + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { + if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + rdt_last_cmd_puts("Overlaps with other group\n"); + return -EINVAL; + } + } + + cfg->new_ctrl = cbm_val; + cfg->have_new_ctrl = true; + + return 0; +} + +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + +/* + * For each domain in this resource we expect to find a series of: + * id=mask + * separated by ";". The "id" is in decimal, and must match one of + * the "id"s for this resource. + */ +static int parse_line(char *line, struct resctrl_schema *s, + struct rdtgroup *rdtgrp) +{ + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + struct rdt_parse_data data; + char *dom = NULL, *id; + struct rdt_domain *d; + unsigned long dom_id; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + +next: + if (!line || line[0] == '\0') + return 0; + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + dom = strim(dom); + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + data.buf = dom; + data.rdtgrp = rdtgrp; + if (parse_ctrlval(&data, s, d)) + return -EINVAL; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + cfg = &d->staged_config[t]; + /* + * In pseudo-locking setup mode and just + * parsed a valid CBM that should be + * pseudo-locked. Only one locked region per + * resource group and domain so just do + * the required initialization for single + * region and return. + */ + rdtgrp->plr->s = s; + rdtgrp->plr->d = d; + rdtgrp->plr->cbm = cfg->new_ctrl; + d->plr = rdtgrp->plr; + return 0; + } + goto next; + } + } + return -EINVAL; +} + +static int rdtgroup_parse_resource(char *resname, char *tok, + struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + + list_for_each_entry(s, &resctrl_schema_all, list) { + if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) + return parse_line(tok, s, rdtgrp); + } + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); + return -EINVAL; +} + +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct resctrl_schema *s; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + char *tok, *resname; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + /* + * No changes to pseudo-locked region allowed. It has to be removed + * and re-created instead. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = -EINVAL; + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); + goto out; + } + + rdt_staged_configs_clear(); + + while ((tok = strsep(&buf, "\n")) != NULL) { + resname = strim(strsep(&tok, ":")); + if (!tok) { + rdt_last_cmd_puts("Missing ':'\n"); + ret = -EINVAL; + goto out; + } + if (tok[0] == '\0') { + rdt_last_cmd_printf("Missing '%s' value\n", resname); + ret = -EINVAL; + goto out; + } + ret = rdtgroup_parse_resource(resname, tok, rdtgrp); + if (ret) + goto out; + } + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + + /* + * Writes to mba_sc resources update the software controller, + * not the control MSR. + */ + if (is_mba_sc(r)) + continue; + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret) + goto out; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * If pseudo-locking fails we keep the resource group in + * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service + * active and updated for just the domain the pseudo-locked + * region was requested for. + */ + ret = rdtgroup_pseudo_lock_create(rdtgrp); + } + +out: + rdt_staged_configs_clear(); + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) +{ + struct rdt_resource *r = schema->res; + struct rdt_domain *dom; + bool sep = false; + u32 ctrl_val; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + if (is_mba_sc(r)) + ctrl_val = dom->mbps_val[closid]; + else + ctrl_val = resctrl_arch_get_config(r, dom, closid, + schema->conf_type); + + seq_printf(s, r->format_str, dom->id, max_data_width, + ctrl_val); + sep = true; + } + seq_puts(s, "\n"); +} + +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + struct rdtgroup *rdtgrp; + int ret = 0; + u32 closid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + list_for_each_entry(schema, &resctrl_schema_all, list) { + seq_printf(s, "%s:uninitialized\n", schema->name); + } + } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%s:%d=%x\n", + rdtgrp->plr->s->res->name, + rdtgrp->plr->d->id, + rdtgrp->plr->cbm); + } + } else { + closid = rdtgrp->closid; + list_for_each_entry(schema, &resctrl_schema_all, list) { + if (closid < schema->num_closid) + show_doms(s, schema, closid); + } + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + return ret; +} + +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first) +{ + int cpu; + + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + /* + * Setup the parameters to pass to mon_event_count() to read the data. + */ + rr->rgrp = rdtgrp; + rr->evtid = evtid; + rr->r = r; + rr->d = d; + rr->val = 0; + rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } + + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); +} + +int rdtgroup_mondata_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + u32 resid, evtid, domid; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + union mon_data_bits md; + struct rdt_domain *d; + struct rmid_read rr; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } + + md.priv = of->kn->priv; + resid = md.u.rid; + domid = md.u.domid; + evtid = md.u.evtid; + + r = resctrl_arch_get_resource(resid); + d = resctrl_arch_find_domain(r, domid); + if (IS_ERR_OR_NULL(d)) { + ret = -ENOENT; + goto out; + } + + mon_event_read(&rr, r, d, rdtgrp, evtid, false); + + if (rr.err == -EIO) + seq_puts(m, "Error\n"); + else if (rr.err == -EINVAL) + seq_puts(m, "Unavailable\n"); + else + seq_printf(m, "%llu\n", rr.val); + +out: + rdtgroup_kn_unlock(of->kn); + return ret; +} diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h index e69de29bb2d1..f73267762a87 100644 --- a/fs/resctrl/internal.h +++ b/fs/resctrl/internal.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_RESCTRL_INTERNAL_H +#define _FS_RESCTRL_INTERNAL_H + +#include +#include +#include +#include +#include +#include + +#include + +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. + * + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. + */ +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) +{ + unsigned int cpu, hk_cpu; + + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) + return cpu; + + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; + bool enable_debug; +}; + +static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) +{ + struct kernfs_fs_context *kfc = fc->fs_private; + + return container_of(kfc, struct rdt_fs_context, kfc); +} + +/** + * struct mon_evt - Entry in the event list of a resource + * @evtid: event id + * @name: name of the event + * @configurable: true if the event is configurable + * @list: entry in &rdt_resource->evt_list + */ +struct mon_evt { + enum resctrl_event_id evtid; + char *name; + bool configurable; + struct list_head list; +}; + +/** + * union mon_data_bits - Monitoring details for each event file + * @priv: Used to store monitoring event data in @u + * as kernfs private data + * @rid: Resource id associated with the event file + * @evtid: Event id associated with the event file + * @domid: The domain to which the event file belongs + * @u: Name of the bit fields struct + */ +union mon_data_bits { + void *priv; + struct { + unsigned int rid : 10; + enum resctrl_event_id evtid : 8; + unsigned int domid : 14; + } u; +}; + +struct rmid_read { + struct rdtgroup *rgrp; + struct rdt_resource *r; + struct rdt_domain *d; + enum resctrl_event_id evtid; + bool first; + int err; + u64 val; + void *arch_mon_ctx; +}; + +extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP, + RDT_NUM_GROUP, +}; + +/** + * enum rdtgrp_mode - Mode of a RDT resource group + * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations + * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed + * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking + * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations + * allowed AND the allocations are Cache Pseudo-Locked + * @RDT_NUM_MODES: Total number of modes + * + * The mode of a resource group enables control over the allowed overlap + * between allocations associated with different resource groups (classes + * of service). User is able to modify the mode of a resource group by + * writing to the "mode" resctrl file associated with the resource group. + * + * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by + * writing the appropriate text to the "mode" file. A resource group enters + * "pseudo-locked" mode after the schemata is written while the resource + * group is in "pseudo-locksetup" mode. + */ +enum rdtgrp_mode { + RDT_MODE_SHAREABLE = 0, + RDT_MODE_EXCLUSIVE, + RDT_MODE_PSEUDO_LOCKSETUP, + RDT_MODE_PSEUDO_LOCKED, + + /* Must be last */ + RDT_NUM_MODES, +}; + +/** + * struct mongroup - store mon group's data in resctrl fs. + * @mon_data_kn: kernfs node for the mon_data directory + * @parent: parent rdtgrp + * @crdtgrp_list: child rdtgroup node list + * @rmid: rmid for this rdtgroup + */ +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; +}; + +/** + * struct rdtgroup - store rdtgroup's data in resctrl file system. + * @kn: kernfs node + * @rdtgroup_list: linked list for all rdtgroups + * @closid: closid for this rdtgroup + * @cpu_mask: CPUs assigned to this rdtgroup + * @flags: status bits + * @waitcount: how many cpus expect to find this + * group when they acquire rdtgroup_mutex + * @type: indicates type of this rdtgroup - either + * monitor only or ctrl_mon group + * @mon: mongroup related data + * @mode: mode of resource group + * @plr: pseudo-locked region + */ +struct rdtgroup { + struct kernfs_node *kn; + struct list_head rdtgroup_list; + u32 closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + enum rdtgrp_mode mode; + struct pseudo_lock_region *plr; +}; + +/* List of all resource groups */ +extern struct list_head rdt_all_groups; + +extern int max_name_width, max_data_width; + +/** + * struct rftype - describe each file in the resctrl file system + * @name: File name + * @mode: Access mode + * @kf_ops: File operations + * @flags: File specific RFTYPE_FLAGS_* flags + * @fflags: File specific RFTYPE_* flags + * @seq_show: Show content of the file + * @write: Write to the file + */ +struct rftype { + char *name; + umode_t mode; + const struct kernfs_ops *kf_ops; + unsigned long flags; + unsigned long fflags; + + int (*seq_show)(struct kernfs_open_file *of, + struct seq_file *sf, void *v); + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +}; + +/** + * struct mbm_state - status for each MBM counter in each domain + * @prev_bw_bytes: Previous bytes value read for bandwidth calculation + * @prev_bw: The most recent bandwidth in MBps + */ +struct mbm_state { + u64 prev_bw_bytes; + u32 prev_bw; +}; + +static inline bool is_mba_sc(struct rdt_resource *r) +{ + if (!r) + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + /* + * The software controller support is only applicable to MBA resource. + * Make sure to check for resource type. + */ + if (r->rid != RDT_RESOURCE_MBA) + return false; + + return r->membw.mba_sc; +} + +extern struct mutex rdtgroup_mutex; +extern struct rdtgroup rdtgroup_default; +extern struct dentry *debugfs_resctrl; + +void rdt_last_cmd_clear(void); +void rdt_last_cmd_puts(const char *s); +__printf(1, 2) +void rdt_last_cmd_printf(const char *fmt, ...); + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); +void rdtgroup_kn_unlock(struct kernfs_node *kn); +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask); +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v); +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive); +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm); +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); +int rdtgroup_tasks_assigned(struct rdtgroup *r); +int closids_supported(void); +void closid_free(int closid); +int alloc_rmid(u32 closid); +void free_rmid(u32 closid, u32 rmid); +void resctrl_mon_resource_exit(void); +void mon_event_count(void *info); +int rdtgroup_mondata_show(struct seq_file *m, void *arg); +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first); +int resctrl_mon_resource_init(void); +void mbm_setup_overflow_handler(struct rdt_domain *dom, + unsigned long delay_ms, + int exclude_cpu); +void mbm_handle_overflow(struct work_struct *work); +bool is_mba_sc(struct rdt_resource *r); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); +void cqm_handle_limbo(struct work_struct *work); +bool has_busy_rmid(struct rdt_domain *d); +void __check_limbo(struct rdt_domain *d, bool force_free); +void mbm_config_rftype_init(const char *config); +void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); + +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + +#endif /* _FS_RESCTRL_INTERNAL_H */ diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index e69de29bb2d1..06f660dfd929 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Monitoring code + * + * Copyright (C) 2017 Intel Corporation + * + * Author: + * Vikas Shivappa + * + * This replaces the cqm.c based on perf but we reuse a lot of + * code and datastructures originally from Peter Zijlstra and Matt Fleming. + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#include +#include +#include +#include +#include "internal.h" + +/* + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ +struct rmid_entry { + u32 closid; + u32 rmid; + int busy; + struct list_head list; +}; + +/* + * @rmid_free_lru - A least recently used list of free RMIDs + * These RMIDs are guaranteed to have an occupancy less than the + * threshold occupancy + */ +static LIST_HEAD(rmid_free_lru); + +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + +/* + * @rmid_limbo_count - count of currently unused but (potentially) + * dirty RMIDs. + * This counts RMIDs that no one is currently using but that + * may have a occupancy value > resctrl_rmid_realloc_threshold. User can + * change the threshold occupancy value. + */ +static unsigned int rmid_limbo_count; + +/* + * @rmid_entry - The entry in the limbo and free lists. + */ +static struct rmid_entry *rmid_ptrs; + +/* + * This is the threshold cache occupancy in bytes at which we will consider an + * RMID available for re-allocation. + */ +unsigned int resctrl_rmid_realloc_threshold; + +/* + * This is the maximum value for the reallocation threshold, in bytes. + */ +unsigned int resctrl_rmid_realloc_limit; + +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) +{ + struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); + + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); + + return entry; +} + +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + +/* + * Check the RMIDs that are marked as busy for this domain. If the + * reported LLC occupancy is below the threshold clear the busy bit and + * decrement the count. If the busy count gets to zero on an RMID, we + * free the RMID + */ +void __check_limbo(struct rdt_domain *d, bool force_free) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + struct rmid_entry *entry; + u32 idx, cur_idx = 1; + void *arch_mon_ctx; + bool rmid_dirty; + u64 val = 0; + + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + + /* + * Skip RMID 0 and start from RMID 1 and check all the RMIDs that + * are marked as busy for occupancy < threshold. If the occupancy + * is less than the threshold decrement the busy counter of the + * RMID and move it to the free list when the counter reaches 0. + */ + for (;;) { + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) + break; + + entry = __rmid_entry(idx); + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { + rmid_dirty = true; + } else { + rmid_dirty = (val >= resctrl_rmid_realloc_threshold); + } + + if (force_free || !rmid_dirty) { + clear_bit(idx, d->rmid_busy_llc); + if (!--entry->busy) + limbo_release_entry(entry); + } + cur_idx = idx + 1; + } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); +} + +bool has_busy_rmid(struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; +} + +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + +/* + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. + */ +int alloc_rmid(u32 closid) +{ + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); + + list_del(&entry->list); + return entry->rmid; +} + +static void add_rmid_to_limbo(struct rmid_entry *entry) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + u32 idx; + + lockdep_assert_held(&rdtgroup_mutex); + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); + + entry->busy = 0; + list_for_each_entry(d, &r->domains, list) { + /* + * For the first limbo RMID in the domain, + * setup up the limbo worker. + */ + if (!has_busy_rmid(d)) + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); + set_bit(idx, d->rmid_busy_llc); + entry->busy++; + } + + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; +} + +void free_rmid(u32 closid, u32 rmid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); + + if (resctrl_arch_is_llc_occupancy_enabled()) + add_rmid_to_limbo(entry); + else + list_add_tail(&entry->list, &rmid_free_lru); +} + +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return &d->mbm_total[idx]; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return &d->mbm_local[idx]; + default: + return NULL; + } +} + +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + struct mbm_state *m; + u64 tval = 0; + + if (rr->first) { + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); + if (m) + memset(m, 0, sizeof(struct mbm_state)); + return 0; + } + + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval, rr->arch_mon_ctx); + if (rr->err) + return rr->err; + + rr->val += tval; + + return 0; +} + +/* + * mbm_bw_count() - Update bw count from values previously read by + * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. + * @rmid: The rmid used to identify the cached mbm_state. + * @rr: The struct rmid_read populated by __mon_event_count(). + * + * Supporting function to calculate the memory bandwidth + * and delta bandwidth in MBps. The chunks value previously read by + * __mon_event_count() is compared with the chunks value from the previous + * invocation. This must be called once per second to maintain values in MBps. + */ +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; + u64 cur_bw, bytes, cur_bytes; + + cur_bytes = rr->val; + bytes = cur_bytes - m->prev_bw_bytes; + m->prev_bw_bytes = cur_bytes; + + cur_bw = bytes / SZ_1M; + + m->prev_bw = cur_bw; +} + +/* + * This is scheduled by mon_event_read() to read the CQM/MBM counters + * on a domain. + */ +void mon_event_count(void *info) +{ + struct rdtgroup *rdtgrp, *entry; + struct rmid_read *rr = info; + struct list_head *head; + int ret; + + rdtgrp = rr->rgrp; + + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); + + /* + * For Ctrl groups read data from child monitor groups and + * add them together. Count events which are read successfully. + * Discard the rmid_read's reporting errors. + */ + head = &rdtgrp->mon.crdtgrp_list; + + if (rdtgrp->type == RDTCTRL_GROUP) { + list_for_each_entry(entry, head, mon.crdtgrp_list) { + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) + ret = 0; + } + } + + /* + * __mon_event_count() calls for newly created monitor groups may + * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. + * Discard error if any of the monitor event reads succeeded. + */ + if (ret == 0) + rr->err = 0; +} + +/* + * Feedback loop for MBA software controller (mba_sc) + * + * mba_sc is a feedback loop where we periodically read MBM counters and + * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so + * that: + * + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) + * + * This uses the MBM counters to measure the bandwidth and MBA throttle + * MSRs to control the bandwidth for a particular rdtgrp. It builds on the + * fact that resctrl rdtgroups have both monitoring and control. + * + * The frequency of the checks is 1s and we just tag along the MBM overflow + * timer. Having 1s interval makes the calculation of bandwidth simpler. + * + * Although MBA's goal is to restrict the bandwidth to a maximum, there may + * be a need to increase the bandwidth to avoid unnecessarily restricting + * the L2 <-> L3 traffic. + * + * Since MBA controls the L2 external bandwidth where as MBM measures the + * L3 external bandwidth the following sequence could lead to such a + * situation. + * + * Consider an rdtgroup which had high L3 <-> memory traffic in initial + * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but + * after some time rdtgroup has mostly L2 <-> L3 traffic. + * + * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its + * throttle MSRs already have low percentage values. To avoid + * unnecessarily restricting such rdtgroups, we also increase the bandwidth. + */ +static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) +{ + u32 closid, rmid, cur_msr_val, new_msr_val; + struct mbm_state *pmbm_data, *cmbm_data; + struct rdt_resource *r_mba; + struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; + struct list_head *head; + struct rdtgroup *entry; + + if (!resctrl_arch_is_mbm_local_enabled()) + return; + + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + closid = rgrp->closid; + rmid = rgrp->mon.rmid; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; + + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); + if (!dom_mba) { + pr_warn_once("Failure to get domain for MBA update\n"); + return; + } + + cur_bw = pmbm_data->prev_bw; + user_bw = dom_mba->mbps_val[closid]; + + /* MBA resource doesn't support CDP */ + cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); + + /* + * For Ctrl groups read data from child monitor groups. + */ + head = &rgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cur_bw += cmbm_data->prev_bw; + } + + /* + * Scale up/down the bandwidth linearly for the ctrl group. The + * bandwidth step is the bandwidth granularity specified by the + * hardware. + * Always increase throttling if current bandwidth is above the + * target set by user. + * But avoid thrashing up and down on every poll by checking + * whether a decrease in throttling is likely to push the group + * back over target. E.g. if currently throttling to 30% of bandwidth + * on a system with 10% granularity steps, check whether moving to + * 40% would go past the limit by multiplying current bandwidth by + * "(30 + 10) / 30". + */ + if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { + new_msr_val = cur_msr_val - r_mba->membw.bw_gran; + } else if (cur_msr_val < MAX_MBA_BW && + (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { + new_msr_val = cur_msr_val + r_mba->membw.bw_gran; + } else { + return; + } + + resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); +} + +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) +{ + struct rmid_read rr; + + rr.first = false; + rr.r = r; + rr.d = d; + + /* + * This is protected from concurrent reads from user + * as both the user and we hold the global mutex. + */ + if (resctrl_arch_is_mbm_total_enabled()) { + rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } + if (resctrl_arch_is_mbm_local_enabled()) { + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ + if (is_mba_sc(NULL)) + mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } +} + +/* + * Handler to scan the limbo list and move the RMIDs + * to free list whose occupancy < threshold_occupancy. + */ +void cqm_handle_limbo(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + d = container_of(work, struct rdt_domain, cqm_limbo.work); + + __check_limbo(d, false); + + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->cqm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); +} + +void mbm_handle_overflow(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + struct rdt_resource *r; + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + goto out_unlock; + + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + d = container_of(work, struct rdt_domain, mbm_over.work); + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); + + if (is_mba_sc(NULL)) + update_mba_bw(prgrp, d); + } + + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + return; + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->mbm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); +} + +static int dom_data_init(struct rdt_resource *r) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rmid_entry *entry = NULL; + int err = 0, i; + u32 idx; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } + + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } + + for (i = 0; i < idx_limit; i++) { + entry = &rmid_ptrs[i]; + INIT_LIST_HEAD(&entry->list); + + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); + list_add_tail(&entry->list, &rmid_free_lru); + } + + /* + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). + */ + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); + list_del(&entry->list); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +static void dom_data_exit(struct rdt_resource *r) +{ + if (!r->mon_capable) + return; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + +static struct mon_evt llc_occupancy_event = { + .name = "llc_occupancy", + .evtid = QOS_L3_OCCUP_EVENT_ID, +}; + +static struct mon_evt mbm_total_event = { + .name = "mbm_total_bytes", + .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, +}; + +static struct mon_evt mbm_local_event = { + .name = "mbm_local_bytes", + .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, +}; + +/* + * Initialize the event list for the resource. + * + * Note that MBM events are also part of RDT_RESOURCE_L3 resource + * because as per the SDM the total and local memory bandwidth + * are enumerated as part of L3 monitoring. + */ +static void l3_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_llc_occupancy_enabled()) + list_add_tail(&llc_occupancy_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_total_enabled()) + list_add_tail(&mbm_total_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_local_enabled()) + list_add_tail(&mbm_local_event.list, &r->evt_list); +} + +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + + return 0; +} + +void resctrl_mon_resource_exit(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); +} diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c index e69de29bb2d1..077c2abb6edd 100644 --- a/fs/resctrl/psuedo_lock.c +++ b/fs/resctrl/psuedo_lock.c @@ -0,0 +1,1122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Resource Director Technology (RDT) + * + * Pseudo-locking support built on top of Cache Allocation Technology (CAT) + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Reinette Chatre + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "internal.h" + +/* + * Major number assigned to and shared by all devices exposing + * pseudo-locked regions. + */ +static unsigned int pseudo_lock_major; +static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); + +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) +{ + const struct rdtgroup *rdtgrp; + + rdtgrp = dev_get_drvdata(dev); + if (mode) + *mode = 0600; + return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); +} + +static const struct class pseudo_lock_class = { + .name = "pseudo_lock", + .devnode = pseudo_lock_devnode, +}; + +/** + * pseudo_lock_minor_get - Obtain available minor number + * @minor: Pointer to where new minor number will be stored + * + * A bitmask is used to track available minor numbers. Here the next free + * minor number is marked as unavailable and returned. + * + * Return: 0 on success, <0 on failure. + */ +static int pseudo_lock_minor_get(unsigned int *minor) +{ + unsigned long first_bit; + + first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); + + if (first_bit == MINORBITS) + return -ENOSPC; + + __clear_bit(first_bit, &pseudo_lock_minor_avail); + *minor = first_bit; + + return 0; +} + +/** + * pseudo_lock_minor_release - Return minor number to available + * @minor: The minor number made available + */ +static void pseudo_lock_minor_release(unsigned int minor) +{ + __set_bit(minor, &pseudo_lock_minor_avail); +} + +/** + * region_find_by_minor - Locate a pseudo-lock region by inode minor number + * @minor: The minor number of the device representing pseudo-locked region + * + * When the character device is accessed we need to determine which + * pseudo-locked region it belongs to. This is done by matching the minor + * number of the device to the pseudo-locked region it belongs. + * + * Minor numbers are assigned at the time a pseudo-locked region is associated + * with a cache instance. + * + * Return: On success return pointer to resource group owning the pseudo-locked + * region, NULL on failure. + */ +static struct rdtgroup *region_find_by_minor(unsigned int minor) +{ + struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->plr && rdtgrp->plr->minor == minor) { + rdtgrp_match = rdtgrp; + break; + } + } + return rdtgrp_match; +} + +/** + * struct pseudo_lock_pm_req - A power management QoS request list entry + * @list: Entry within the @pm_reqs list for a pseudo-locked region + * @req: PM QoS request + */ +struct pseudo_lock_pm_req { + struct list_head list; + struct dev_pm_qos_request req; +}; + +static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req, *next; + + list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { + dev_pm_qos_remove_request(&pm_req->req); + list_del(&pm_req->list); + kfree(pm_req); + } +} + +/** + * pseudo_lock_cstates_constrain - Restrict cores from entering C6 + * @plr: Pseudo-locked region + * + * To prevent the cache from being affected by power management entering + * C6 has to be avoided. This is accomplished by requesting a latency + * requirement lower than lowest C6 exit latency of all supported + * platforms as found in the cpuidle state tables in the intel_idle driver. + * At this time it is possible to do so with a single latency requirement + * for all supported platforms. + * + * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, + * the ACPI latencies need to be considered while keeping in mind that C2 + * may be set to map to deeper sleep states. In this case the latency + * requirement needs to prevent entering C2 also. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req; + int cpu; + int ret; + + for_each_cpu(cpu, &plr->d->cpu_mask) { + pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); + if (!pm_req) { + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); + ret = -ENOMEM; + goto out_err; + } + ret = dev_pm_qos_add_request(get_cpu_device(cpu), + &pm_req->req, + DEV_PM_QOS_RESUME_LATENCY, + 30); + if (ret < 0) { + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", + cpu); + kfree(pm_req); + ret = -1; + goto out_err; + } + list_add(&pm_req->list, &plr->pm_reqs); + } + + return 0; + +out_err: + pseudo_lock_cstates_relax(plr); + return ret; +} + +/** + * pseudo_lock_region_clear - Reset pseudo-lock region data + * @plr: pseudo-lock region + * + * All content of the pseudo-locked region is reset - any memory allocated + * freed. + * + * Return: void + */ +static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) +{ + plr->size = 0; + plr->line_size = 0; + kfree(plr->kmem); + plr->kmem = NULL; + plr->s = NULL; + if (plr->d) + plr->d->plr = NULL; + plr->d = NULL; + plr->cbm = 0; + plr->debugfs_dir = NULL; +} + +/** + * pseudo_lock_region_init - Initialize pseudo-lock region information + * @plr: pseudo-lock region + * + * Called after user provided a schemata to be pseudo-locked. From the + * schemata the &struct pseudo_lock_region is on entry already initialized + * with the resource, domain, and capacity bitmask. Here the information + * required for pseudo-locking is deduced from this data and &struct + * pseudo_lock_region initialized further. This information includes: + * - size in bytes of the region to be pseudo-locked + * - cache line size to know the stride with which data needs to be accessed + * to be pseudo-locked + * - a cpu associated with the cache instance on which the pseudo-locking + * flow can be executed + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_init(struct pseudo_lock_region *plr) +{ + struct cpu_cacheinfo *ci; + int ret; + int i; + + /* Pick the first cpu we find that is associated with the cache. */ + plr->cpu = cpumask_first(&plr->d->cpu_mask); + + if (!cpu_online(plr->cpu)) { + rdt_last_cmd_printf("CPU %u associated with cache not online\n", + plr->cpu); + ret = -ENODEV; + goto out_region; + } + + ci = get_cpu_cacheinfo(plr->cpu); + + plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == plr->s->res->cache_level) { + plr->line_size = ci->info_list[i].coherency_line_size; + return 0; + } + } + + ret = -1; + rdt_last_cmd_puts("Unable to determine cache line size\n"); +out_region: + pseudo_lock_region_clear(plr); + return ret; +} + +/** + * pseudo_lock_init - Initialize a pseudo-lock region + * @rdtgrp: resource group to which new pseudo-locked region will belong + * + * A pseudo-locked region is associated with a resource group. When this + * association is created the pseudo-locked region is initialized. The + * details of the pseudo-locked region are not known at this time so only + * allocation is done and association established. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_init(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr; + + plr = kzalloc(sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + init_waitqueue_head(&plr->lock_thread_wq); + INIT_LIST_HEAD(&plr->pm_reqs); + rdtgrp->plr = plr; + return 0; +} + +/** + * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked + * @plr: pseudo-lock region + * + * Initialize the details required to set up the pseudo-locked region and + * allocate the contiguous memory that will be pseudo-locked to the cache. + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) +{ + int ret; + + ret = pseudo_lock_region_init(plr); + if (ret < 0) + return ret; + + /* + * We do not yet support contiguous regions larger than + * KMALLOC_MAX_SIZE. + */ + if (plr->size > KMALLOC_MAX_SIZE) { + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); + ret = -E2BIG; + goto out_region; + } + + plr->kmem = kzalloc(plr->size, GFP_KERNEL); + if (!plr->kmem) { + rdt_last_cmd_puts("Unable to allocate memory\n"); + ret = -ENOMEM; + goto out_region; + } + + ret = 0; + goto out; +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * pseudo_lock_free - Free a pseudo-locked region + * @rdtgrp: resource group to which pseudo-locked region belonged + * + * The pseudo-locked region's resources have already been released, or not + * yet created at this point. Now it can be freed and disassociated from the + * resource group. + * + * Return: void + */ +static void pseudo_lock_free(struct rdtgroup *rdtgrp) +{ + pseudo_lock_region_clear(rdtgrp->plr); + kfree(rdtgrp->plr); + rdtgrp->plr = NULL; +} + +/** + * rdtgroup_monitor_in_progress - Test if monitoring in progress + * @rdtgrp: resource group being queried + * + * Return: 1 if monitor groups have been created for this resource + * group, 0 otherwise. + */ +static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) +{ + return !list_empty(&rdtgrp->mon.crdtgrp_list); +} + +/** + * rdtgroup_locksetup_user_restrict - Restrict user access to group + * @rdtgrp: resource group needing access restricted + * + * A resource group used for cache pseudo-locking cannot have cpus or tasks + * assigned to it. This is communicated to the user by restricting access + * to all the files that can be used to make such changes. + * + * Permissions restored with rdtgroup_locksetup_user_restore() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restriction of access an attempt will be made to restore permissions but + * the state of the mode of these files will be uncertain when a failure + * occurs. + */ +static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); +err_cpus: + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); +err_tasks: + rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); +out: + return ret; +} + +/** + * rdtgroup_locksetup_user_restore - Restore user access to group + * @rdtgrp: resource group needing access restored + * + * Restore all file access previously removed using + * rdtgroup_locksetup_user_restrict() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restoration of access an attempt will be made to restrict permissions + * again but the state of the mode of these files will be uncertain when + * a failure occurs. + */ +static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); +err_cpus: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); +err_tasks: + rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); +out: + return ret; +} + +/** + * rdtgroup_locksetup_enter - Resource group enters locksetup mode + * @rdtgrp: resource group requested to enter locksetup mode + * + * A resource group enters locksetup mode to reflect that it would be used + * to represent a pseudo-locked region and is in the process of being set + * up to do so. A resource group used for a pseudo-locked region would + * lose the closid associated with it so we cannot allow it to have any + * tasks or cpus assigned nor permit tasks or cpus to be assigned in the + * future. Monitoring of a pseudo-locked region is not allowed either. + * + * The above and more restrictions on a pseudo-locked region are checked + * for and enforced before the resource group enters the locksetup mode. + * + * Returns: 0 if the resource group successfully entered locksetup mode, <0 + * on failure. On failure the last_cmd_status buffer is updated with text to + * communicate details of failure to the user. + */ +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + int ret; + + /* + * The default resource group can neither be removed nor lose the + * default closid associated with it. + */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); + return -EINVAL; + } + + /* + * Cache Pseudo-locking not supported when CDP is enabled. + * + * Some things to consider if you would like to enable this + * support (using L3 CDP as example): + * - When CDP is enabled two separate resources are exposed, + * L3DATA and L3CODE, but they are actually on the same cache. + * The implication for pseudo-locking is that if a + * pseudo-locked region is created on a domain of one + * resource (eg. L3CODE), then a pseudo-locked region cannot + * be created on that same domain of the other resource + * (eg. L3DATA). This is because the creation of a + * pseudo-locked region involves a call to wbinvd that will + * affect all cache allocations on particular domain. + * - Considering the previous, it may be possible to only + * expose one of the CDP resources to pseudo-locking and + * hide the other. For example, we could consider to only + * expose L3DATA and since the L3 cache is unified it is + * still possible to place instructions there are execute it. + * - If only one region is exposed to pseudo-locking we should + * still keep in mind that availability of a portion of cache + * for pseudo-locking should take into account both resources. + * Similarly, if a pseudo-locked region is created in one + * resource, the portion of cache used by it should be made + * unavailable to all future allocations from both resources. + */ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || + resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { + rdt_last_cmd_puts("CDP enabled\n"); + return -EINVAL; + } + + /* + * Not knowing the bits to disable prefetching implies that this + * platform does not support Cache Pseudo-Locking. + */ + if (resctrl_arch_get_prefetch_disable_bits() == 0) { + rdt_last_cmd_puts("Pseudo-locking not supported\n"); + return -EINVAL; + } + + if (rdtgroup_monitor_in_progress(rdtgrp)) { + rdt_last_cmd_puts("Monitoring in progress\n"); + return -EINVAL; + } + + if (rdtgroup_tasks_assigned(rdtgrp)) { + rdt_last_cmd_puts("Tasks assigned to resource group\n"); + return -EINVAL; + } + + if (!cpumask_empty(&rdtgrp->cpu_mask)) { + rdt_last_cmd_puts("CPUs assigned to resource group\n"); + return -EINVAL; + } + + if (rdtgroup_locksetup_user_restrict(rdtgrp)) { + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); + return -EIO; + } + + ret = pseudo_lock_init(rdtgrp); + if (ret) { + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); + goto out_release; + } + + /* + * If this system is capable of monitoring a rmid would have been + * allocated when the control group was created. This is not needed + * anymore when this group would be used for pseudo-locking. This + * is safe to call on platforms not capable of monitoring. + */ + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + ret = 0; + goto out; + +out_release: + rdtgroup_locksetup_user_restore(rdtgrp); +out: + return ret; +} + +/** + * rdtgroup_locksetup_exit - resource group exist locksetup mode + * @rdtgrp: resource group + * + * When a resource group exits locksetup mode the earlier restrictions are + * lifted. + * + * Return: 0 on success, <0 on failure + */ +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + int ret; + + if (resctrl_arch_mon_capable()) { + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + } + + ret = rdtgroup_locksetup_user_restore(rdtgrp); + if (ret) { + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + pseudo_lock_free(rdtgrp); + return 0; +} + +/** + * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked + * @d: RDT domain + * @cbm: CBM to test + * + * @d represents a cache instance and @cbm a capacity bitmask that is + * considered for it. Determine if @cbm overlaps with any existing + * pseudo-locked region on @d. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: true if @cbm overlaps with pseudo-locked region on @d, false + * otherwise. + */ +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + unsigned int cbm_len; + unsigned long cbm_b; + + if (d->plr) { + cbm_len = d->plr->s->res->cache.cbm_len; + cbm_b = d->plr->cbm; + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) + return true; + } + return false; +} + +/** + * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy + * @d: RDT domain under test + * + * The setup of a pseudo-locked region affects all cache instances within + * the hierarchy of the region. It is thus essential to know if any + * pseudo-locked regions exist within a cache hierarchy to prevent any + * attempts to create new pseudo-locked regions in the same hierarchy. + * + * Return: true if a pseudo-locked region exists in the hierarchy of @d or + * if it is not possible to test due to memory allocation issue, + * false otherwise. + */ +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *d_i; + bool ret = false; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) + return true; + + /* + * First determine which cpus have pseudo-locked regions + * associated with them. + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(d_i, &r->domains, list) { + if (d_i->plr) + cpumask_or(cpu_with_psl, cpu_with_psl, + &d_i->cpu_mask); + } + } + + /* + * Next test if new pseudo-locked region would intersect with + * existing region. + */ + if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) + ret = true; + + free_cpumask_var(cpu_with_psl); + return ret; +} + +/** + * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region + * @rdtgrp: Resource group to which the pseudo-locked region belongs. + * @sel: Selector of which measurement to perform on a pseudo-locked region. + * + * The measurement of latency to access a pseudo-locked region should be + * done from a cpu that is associated with that pseudo-locked region. + * Determine which cpu is associated with this region and start a thread on + * that cpu to perform the measurement, wait for that thread to complete. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int cpu; + int ret = -1; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out; + } + + if (!plr->d) { + ret = -ENODEV; + goto out; + } + + plr->thread_done = 0; + cpu = cpumask_first(&plr->d->cpu_mask); + if (!cpu_online(cpu)) { + ret = -ENODEV; + goto out; + } + + plr->cpu = cpu; + + if (sel == 1) + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 2) + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 3) + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else + goto out; + + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + goto out; + } + kthread_bind(thread, cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) + goto out; + + ret = 0; + +out: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +static ssize_t pseudo_lock_measure_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rdtgroup *rdtgrp = file->private_data; + size_t buf_size; + char buf[32]; + int ret; + int sel; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + ret = kstrtoint(buf, 10, &sel); + if (ret == 0) { + if (sel != 1 && sel != 2 && sel != 3) + return -EINVAL; + ret = debugfs_file_get(file->f_path.dentry); + if (ret) + return ret; + ret = pseudo_lock_measure_cycles(rdtgrp, sel); + if (ret == 0) + ret = count; + debugfs_file_put(file->f_path.dentry); + } + + return ret; +} + +static const struct file_operations pseudo_measure_fops = { + .write = pseudo_lock_measure_trigger, + .open = simple_open, + .llseek = default_llseek, +}; + +/** + * rdtgroup_pseudo_lock_create - Create a pseudo-locked region + * @rdtgrp: resource group to which pseudo-lock region belongs + * + * Called when a resource group in the pseudo-locksetup mode receives a + * valid schemata that should be pseudo-locked. Since the resource group is + * in pseudo-locksetup mode the &struct pseudo_lock_region has already been + * allocated and initialized with the essential information. If a failure + * occurs the resource group remains in the pseudo-locksetup mode with the + * &struct pseudo_lock_region associated with it, but cleared from all + * information and ready for the user to re-attempt pseudo-locking by + * writing the schemata again. + * + * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 + * on failure. Descriptive error will be written to last_cmd_status buffer. + */ +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int new_minor; + struct device *dev; + int ret; + + ret = pseudo_lock_region_alloc(plr); + if (ret < 0) + return ret; + + ret = pseudo_lock_cstates_constrain(plr); + if (ret < 0) { + ret = -EINVAL; + goto out_region; + } + + plr->thread_done = 0; + + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, + cpu_to_node(plr->cpu), + "pseudo_lock/%u", plr->cpu); + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); + goto out_cstates; + } + + kthread_bind(thread, plr->cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) { + /* + * If the thread does not get on the CPU for whatever + * reason and the process which sets up the region is + * interrupted then this will leave the thread in runnable + * state and once it gets on the CPU it will dereference + * the cleared, but not freed, plr struct resulting in an + * empty pseudo-locking loop. + */ + rdt_last_cmd_puts("Locking thread interrupted\n"); + goto out_cstates; + } + + ret = pseudo_lock_minor_get(&new_minor); + if (ret < 0) { + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); + goto out_cstates; + } + + /* + * Unlock access but do not release the reference. The + * pseudo-locked region will still be here on return. + * + * The mutex has to be released temporarily to avoid a potential + * deadlock with the mm->mmap_lock which is obtained in the + * device_create() and debugfs_create_dir() callpath below as well as + * before the mmap() callback is called. + */ + mutex_unlock(&rdtgroup_mutex); + + if (!IS_ERR_OR_NULL(debugfs_resctrl)) { + plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, + debugfs_resctrl); + if (!IS_ERR_OR_NULL(plr->debugfs_dir)) + debugfs_create_file("pseudo_lock_measure", 0200, + plr->debugfs_dir, rdtgrp, + &pseudo_measure_fops); + } + + dev = device_create(&pseudo_lock_class, NULL, + MKDEV(pseudo_lock_major, new_minor), + rdtgrp, "%s", rdtgrp->kn->name); + + mutex_lock(&rdtgroup_mutex); + + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + rdt_last_cmd_printf("Failed to create character device: %d\n", + ret); + goto out_debugfs; + } + + /* We released the mutex - check if group was removed while we did so */ + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out_device; + } + + plr->minor = new_minor; + + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; + closid_free(rdtgrp->closid); + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); + + ret = 0; + goto out; + +out_device: + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); +out_debugfs: + debugfs_remove_recursive(plr->debugfs_dir); + pseudo_lock_minor_release(new_minor); +out_cstates: + pseudo_lock_cstates_relax(plr); +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region + * @rdtgrp: resource group to which the pseudo-locked region belongs + * + * The removal of a pseudo-locked region can be initiated when the resource + * group is removed from user space via a "rmdir" from userspace or the + * unmount of the resctrl filesystem. On removal the resource group does + * not go back to pseudo-locksetup mode before it is removed, instead it is + * removed directly. There is thus asymmetry with the creation where the + * &struct pseudo_lock_region is removed here while it was not created in + * rdtgroup_pseudo_lock_create(). + * + * Return: void + */ +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * Default group cannot be a pseudo-locked region so we can + * free closid here. + */ + closid_free(rdtgrp->closid); + goto free; + } + + pseudo_lock_cstates_relax(plr); + debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); + pseudo_lock_minor_release(plr->minor); + +free: + pseudo_lock_free(rdtgrp); +} + +static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = region_find_by_minor(iminor(inode)); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + filp->private_data = rdtgrp; + atomic_inc(&rdtgrp->waitcount); + /* Perform a non-seekable open - llseek is not supported */ + filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + + mutex_unlock(&rdtgroup_mutex); + + return 0; +} + +static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + filp->private_data = NULL; + atomic_dec(&rdtgrp->waitcount); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) +{ + /* Not supported */ + return -EINVAL; +} + +static const struct vm_operations_struct pseudo_mmap_ops = { + .mremap = pseudo_lock_dev_mremap, +}; + +static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + struct pseudo_lock_region *plr; + struct rdtgroup *rdtgrp; + unsigned long physical; + unsigned long psize; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + plr = rdtgrp->plr; + + if (!plr->d) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + /* + * Task is required to run with affinity to the cpus associated + * with the pseudo-locked region. If this is not the case the task + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + physical = __pa(plr->kmem) >> PAGE_SHIFT; + psize = plr->size - off; + + if (off > plr->size) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + /* + * Ensure changes are carried directly to the memory being mapped, + * do not allow copy-on-write mapping. + */ + if (!(vma->vm_flags & VM_SHARED)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + if (vsize > psize) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + memset(plr->kmem + off, 0, vsize); + + if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, + vsize, vma->vm_page_prot)) { + mutex_unlock(&rdtgroup_mutex); + return -EAGAIN; + } + vma->vm_ops = &pseudo_mmap_ops; + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static const struct file_operations pseudo_lock_dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = NULL, + .write = NULL, + .open = pseudo_lock_dev_open, + .release = pseudo_lock_dev_release, + .mmap = pseudo_lock_dev_mmap, +}; + +int rdt_pseudo_lock_init(void) +{ + int ret; + + ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); + if (ret < 0) + return ret; + + pseudo_lock_major = ret; + + ret = class_register(&pseudo_lock_class); + if (ret) { + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + return ret; + } + + return 0; +} + +void rdt_pseudo_lock_release(void) +{ + class_unregister(&pseudo_lock_class); + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + pseudo_lock_major = 0; +} diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index e69de29bb2d1..936fc6e47386 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -0,0 +1,4013 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * User interface for Resource Allocation in Resource Director Technology(RDT) + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Fenghua Yu + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "internal.h" + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + +static struct kernfs_root *rdt_root; +struct rdtgroup rdtgroup_default; +LIST_HEAD(rdt_all_groups); + +/* list of entries for the schemata file */ +LIST_HEAD(resctrl_schema_all); + +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + +/* Kernel fs node for "info" directory under root */ +static struct kernfs_node *kn_info; + +/* Kernel fs node for "mon_groups" directory under root */ +static struct kernfs_node *kn_mongrp; + +/* Kernel fs node for "mon_data" directory under root */ +static struct kernfs_node *kn_mondata; + +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + +static struct seq_buf last_cmd_status; +static char last_cmd_status_buf[512]; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + +struct dentry *debugfs_resctrl; + +static bool resctrl_debug; + +void rdt_last_cmd_clear(void) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_clear(&last_cmd_status); +} + +void rdt_last_cmd_puts(const char *s) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_puts(&last_cmd_status, s); +} + +void rdt_last_cmd_printf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_vprintf(&last_cmd_status, fmt, ap); + va_end(ap); +} + +void rdt_staged_configs_clear(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *dom; + + lockdep_assert_held(&rdtgroup_mutex); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(dom, &r->domains, list) + memset(dom->staged_config, 0, sizeof(dom->staged_config)); + } +} + +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_L3_MBM_LOCAL_EVENT_ID); +} + +/* + * Trivial allocator for CLOSIDs. Since h/w only supports a small number, + * we can keep a bitmap of free CLOSIDs in a single integer. + * + * Using a global CLOSID across all resources has some advantages and + * some drawbacks: + * + We can simply set current's closid to assign a task to a resource + * group. + * + Context switch code can avoid extra memory references deciding which + * CLOSID to load into the PQR_ASSOC MSR + * - We give up some options in configuring resource groups across multi-socket + * systems. + * - Our choices on how to configure each resource become progressively more + * limited as the number of resources grows. + */ +static unsigned long closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} + +static void closid_init(void) +{ + struct resctrl_schema *s; + u32 rdt_min_closid = 32; + + /* Compute rdt_min_closid across all resources */ + list_for_each_entry(s, &resctrl_schema_all, list) + rdt_min_closid = min(rdt_min_closid, s->num_closid); + + closid_free_map = BIT_MASK(rdt_min_closid) - 1; + + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); + closid_free_map_len = rdt_min_closid; +} + +static int closid_alloc(void) +{ + int cleanest_closid; + u32 closid; + + lockdep_assert_held(&rdtgroup_mutex); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + resctrl_arch_is_llc_occupancy_enabled()) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = ffs(closid_free_map); + if (closid == 0) + return -ENOSPC; + closid--; + } + __clear_bit(closid, &closid_free_map); + + return closid; +} + +void closid_free(int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, &closid_free_map); +} + +/** + * closid_allocated - test if provided closid is in use + * @closid: closid to be tested + * + * Return: true if @closid is currently associated with a resource group, + * false if @closid is free + */ +bool closid_allocated(unsigned int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, &closid_free_map); +} + +/** + * rdtgroup_mode_by_closid - Return mode of resource group with closid + * @closid: closid if the resource group + * + * Each resource group is associated with a @closid. Here the mode + * of a resource group can be queried by searching for it using its closid. + * + * Return: mode as &enum rdtgrp_mode of resource group with closid @closid + */ +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdtgrp->mode; + } + + return RDT_NUM_MODES; +} + +static const char * const rdt_mode_str[] = { + [RDT_MODE_SHAREABLE] = "shareable", + [RDT_MODE_EXCLUSIVE] = "exclusive", + [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", + [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", +}; + +/** + * rdtgroup_mode_str - Return the string representation of mode + * @mode: the resource group mode as &enum rdtgroup_mode + * + * Return: string representation of valid mode, "unknown" otherwise + */ +static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +{ + if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) + return "unknown"; + + return rdt_mode_str[mode]; +} + +/* set uid and gid of rdtgroup dirs and files to that of the creator */ +static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + +static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) +{ + struct kernfs_node *kn; + int ret; + + kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + 0, rft->kf_ops, rft, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return 0; +} + +static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rftype *rft = of->kn->priv; + + if (rft->seq_show) + return rft->seq_show(of, m, arg); + return 0; +} + +static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct rftype *rft = of->kn->priv; + + if (rft->write) + return rft->write(of, buf, nbytes, off); + + return -EINVAL; +} + +static const struct kernfs_ops rdtgroup_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = rdtgroup_file_write, + .seq_show = rdtgroup_seqfile_show, +}; + +static const struct kernfs_ops kf_mondata_ops = { + .atomic_write_len = PAGE_SIZE, + .seq_show = rdtgroup_mondata_show, +}; + +static bool is_cpu_list(struct kernfs_open_file *of) +{ + struct rftype *rft = of->kn->priv; + + return rft->flags & RFTYPE_FLAGS_CPUS_LIST; +} + +static int rdtgroup_cpus_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + struct cpumask *mask; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + mask = &rdtgrp->plr->d->cpu_mask; + seq_printf(s, is_cpu_list(of) ? + "%*pbl\n" : "%*pb\n", + cpumask_pr_args(mask)); + } + } else { + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", + cpumask_pr_args(&rdtgrp->cpu_mask)); + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +/* + * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, + * + * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. + */ +static void +update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +{ + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); +} + +static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask) +{ + struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; + struct list_head *head; + + /* Check whether cpus belong to parent ctrl group */ + cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + return -EINVAL; + } + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Give any dropped cpus to parent rdtgroup */ + cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); + update_closid_rmid(tmpmask, prgrp); + } + + /* + * If we added cpus, remove them from previous group that owned them + * and update per-cpu rmid + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + if (crgrp == rdtgrp) + continue; + cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, + tmpmask); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + return 0; +} + +static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +{ + struct rdtgroup *crgrp; + + cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); + /* update the child mon group masks as well*/ + list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) + cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); +} + +static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +{ + struct rdtgroup *r, *crgrp; + struct list_head *head; + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Can't drop from default group */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Can't drop CPUs from default group\n"); + return -EINVAL; + } + + /* Give any dropped cpus to rdtgroup_default */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, tmpmask); + update_closid_rmid(tmpmask, &rdtgroup_default); + } + + /* + * If we added cpus, remove them from previous group and + * the prev group's child groups that owned them + * and update per-cpu closid/rmid. + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { + if (r == rdtgrp) + continue; + cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); + if (!cpumask_empty(tmpmask1)) + cpumask_rdtgrp_clear(r, tmpmask1); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + /* + * Clear child mon group masks since there is a new parent mask + * now and update the rmid for the cpus the child lost. + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); + update_closid_rmid(tmpmask, rdtgrp); + cpumask_clear(&crgrp->cpu_mask); + } + + return 0; +} + +static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + cpumask_var_t tmpmask, newmask, tmpmask1; + struct rdtgroup *rdtgrp; + int ret; + + if (!buf) + return -EINVAL; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + return -ENOMEM; + } + if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + return -ENOMEM; + } + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto unlock; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + if (is_cpu_list(of)) + ret = cpulist_parse(buf, newmask); + else + ret = cpumask_parse(buf, newmask); + + if (ret) { + rdt_last_cmd_puts("Bad CPU list/mask\n"); + goto unlock; + } + + /* check that user didn't specify any offline cpus */ + cpumask_andnot(tmpmask, newmask, cpu_online_mask); + if (!cpumask_empty(tmpmask)) { + ret = -EINVAL; + rdt_last_cmd_puts("Can only assign online CPUs\n"); + goto unlock; + } + + if (rdtgrp->type == RDTCTRL_GROUP) + ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); + else if (rdtgrp->type == RDTMON_GROUP) + ret = cpus_mon_write(rdtgrp, newmask, tmpmask); + else + ret = -EINVAL; + +unlock: + rdtgroup_kn_unlock(of->kn); + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + free_cpumask_var(tmpmask1); + + return ret ?: nbytes; +} + +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static void rdtgroup_remove(struct rdtgroup *rdtgrp) +{ + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} + +static void _update_task_closid_rmid(void *task) +{ + /* + * If the task is still current on this CPU, update PQR_ASSOC MSR. + * Otherwise, the MSR is updated when the task is scheduled in. + */ + if (task == current) + resctrl_arch_sched_in(task); +} + +static void update_task_closid_rmid(struct task_struct *t) +{ + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); + else + _update_task_closid_rmid(t); +} + +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + +static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + /* If the task is already in rdtgrp, no need to move the task. */ + if (task_in_rdtgroup(tsk, rdtgrp)) + return 0; + + /* + * Set the task's closid/rmid before the PQR_ASSOC MSR can be + * updated by them. + * + * For ctrl_mon groups, move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. + */ + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; + } + + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + + /* + * Ensure the task's closid and rmid are written before determining if + * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * By now, the task's closid and rmid are set. If the task is current + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource + * group go into effect. If the task is not current, the MSR will be + * updated when the task is scheduled in. + */ + update_task_closid_rmid(tsk); + + return 0; +} + +static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); +} + +static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); +} + +/** + * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group + * @r: Resource group + * + * Return: 1 if tasks have been assigned to @r, 0 otherwise + */ +int rdtgroup_tasks_assigned(struct rdtgroup *r) +{ + struct task_struct *p, *t; + int ret = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + ret = 1; + break; + } + } + rcu_read_unlock(); + + return ret; +} + +static int rdtgroup_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *tcred = get_task_cred(task); + const struct cred *cred = current_cred(); + int ret = 0; + + /* + * Even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) { + rdt_last_cmd_printf("No permission to move task %d\n", task->pid); + ret = -EPERM; + } + + put_cred(tcred); + return ret; +} + +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + rcu_read_unlock(); + rdt_last_cmd_printf("No task %d\n", pid); + return -ESRCH; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = rdtgroup_task_write_permission(tsk, of); + if (!ret) + ret = __rdtgroup_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; +} + +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + char *pid_str; + int ret = 0; + pid_t pid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } + +unlock: + rdtgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p, *t; + pid_t pid; + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + pid = task_pid_vnr(t); + if (pid) + seq_printf(s, "%d\n", pid); + } + } + rcu_read_unlock(); +} + +static int rdtgroup_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + show_rdt_tasks(rdtgrp, s); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +#ifdef CONFIG_PROC_CPU_RESCTRL + +/* + * A task can only be part of one resctrl control group and of one monitor + * group which is associated to that control group. + * + * 1) res: + * mon: + * + * resctrl is not available. + * + * 2) res:/ + * mon: + * + * Task is part of the root resctrl control group, and it is not associated + * to any monitor group. + * + * 3) res:/ + * mon:mon0 + * + * Task is part of the root resctrl control group and monitor group mon0. + * + * 4) res:group0 + * mon: + * + * Task is part of resctrl control group group0, and it is not associated + * to any monitor group. + * + * 5) res:group0 + * mon:mon1 + * + * Task is part of resctrl control group group0 and monitor group mon1. + */ +int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk) +{ + struct rdtgroup *rdtg; + int ret = 0; + + mutex_lock(&rdtgroup_mutex); + + /* Return empty if resctrl has not been mounted. */ + if (!resctrl_mounted) { + seq_puts(s, "res:\nmon:\n"); + goto unlock; + } + + list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { + struct rdtgroup *crg; + + /* + * Task information is only relevant for shareable + * and exclusive groups. + */ + if (rdtg->mode != RDT_MODE_SHAREABLE && + rdtg->mode != RDT_MODE_EXCLUSIVE) + continue; + + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) + continue; + + seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", + rdtg->kn->name); + seq_puts(s, "mon:"); + list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, + mon.crdtgrp_list) { + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) + continue; + seq_printf(s, "%s", crg->kn->name); + break; + } + seq_putc(s, '\n'); + goto unlock; + } + /* + * The above search should succeed. Otherwise return + * with an error. + */ + ret = -ENOENT; +unlock: + mutex_unlock(&rdtgroup_mutex); + + return ret; +} +#endif + +static int rdt_last_cmd_status_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + int len; + + mutex_lock(&rdtgroup_mutex); + len = seq_buf_used(&last_cmd_status); + if (len) + seq_printf(seq, "%.*s", len, last_cmd_status_buf); + else + seq_puts(seq, "ok\n"); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int rdt_num_closids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + + seq_printf(seq, "%u\n", s->num_closid); + return 0; +} + +static int rdt_default_ctrl_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->default_ctrl); + return 0; +} + +static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.min_cbm_bits); + return 0; +} + +static int rdt_shareable_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->cache.shareable_bits); + return 0; +} + +/* + * rdt_bit_usage_show - Display current usage of resources + * + * A domain is a shared resource that can now be allocated differently. Here + * we display the current regions of the domain as an annotated bitmask. + * For each domain of this resource its allocation bitmask + * is annotated as below to indicate the current usage of the corresponding bit: + * 0 - currently unused + * X - currently available for sharing and used by software and hardware + * H - currently used by hardware only but available for software use + * S - currently used and shareable by software only + * E - currently used exclusively by one resource group + * P - currently pseudo-locked by one resource group + */ +static int rdt_bit_usage_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; + struct rdt_resource *r = s->res; + struct rdt_domain *dom; + int i, hwb, swb, excl, psl; + enum rdtgrp_mode mode; + bool sep = false; + u32 ctrl_val; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + hw_shareable = r->cache.shareable_bits; + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_putc(seq, ';'); + sw_shareable = 0; + exclusive = 0; + seq_printf(seq, "%d=", dom->id); + for (i = 0; i < closids_supported(); i++) { + if (!closid_allocated(i)) + continue; + ctrl_val = resctrl_arch_get_config(r, dom, i, + s->conf_type); + mode = rdtgroup_mode_by_closid(i); + switch (mode) { + case RDT_MODE_SHAREABLE: + sw_shareable |= ctrl_val; + break; + case RDT_MODE_EXCLUSIVE: + exclusive |= ctrl_val; + break; + case RDT_MODE_PSEUDO_LOCKSETUP: + /* + * RDT_MODE_PSEUDO_LOCKSETUP is possible + * here but not included since the CBM + * associated with this CLOSID in this mode + * is not initialized and no task or cpu can be + * assigned this CLOSID. + */ + break; + case RDT_MODE_PSEUDO_LOCKED: + case RDT_NUM_MODES: + WARN(1, + "invalid mode for closid %d\n", i); + break; + } + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { + pseudo_locked = dom->plr ? dom->plr->cbm : 0; + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); + if (hwb && swb) + seq_putc(seq, 'X'); + else if (hwb && !swb) + seq_putc(seq, 'H'); + else if (!hwb && swb) + seq_putc(seq, 'S'); + else if (excl) + seq_putc(seq, 'E'); + else if (psl) + seq_putc(seq, 'P'); + else /* Unused bits remain */ + seq_putc(seq, '0'); + } + sep = true; + } + seq_putc(seq, '\n'); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return 0; +} + +static int rdt_min_bw_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.min_bw); + return 0; +} + +static int rdt_num_rmids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%d\n", r->num_rmid); + + return 0; +} + +static int rdt_mon_features_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + struct mon_evt *mevt; + + list_for_each_entry(mevt, &r->evt_list, list) { + seq_printf(seq, "%s\n", mevt->name); + if (mevt->configurable) + seq_printf(seq, "%s_config\n", mevt->name); + } + + return 0; +} + +static int rdt_bw_gran_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.bw_gran); + return 0; +} + +static int rdt_delay_linear_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.delay_linear); + return 0; +} + +static int max_threshold_occ_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); + + return 0; +} + +static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) + seq_puts(seq, "per-thread\n"); + else + seq_puts(seq, "max\n"); + + return 0; +} + +static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + unsigned int bytes; + int ret; + + ret = kstrtouint(buf, 0, &bytes); + if (ret) + return ret; + + if (bytes > resctrl_rmid_realloc_limit) + return -EINVAL; + + resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); + + return nbytes; +} + +/* + * rdtgroup_mode_show - Display mode of this resource group + */ +static int rdtgroup_mode_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); + + rdtgroup_kn_unlock(of->kn); + return 0; +} + +static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +{ + switch (my_type) { + case CDP_CODE: + return CDP_DATA; + case CDP_DATA: + return CDP_CODE; + default: + case CDP_NONE: + return CDP_NONE; + } +} + +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @type: CDP type of @r. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Checks if provided @cbm intended to be used for @closid on domain + * @d overlaps with any other closids or other hardware usage associated + * with this domain. If @exclusive is true then only overlaps with + * resource groups in exclusive mode will be considered. If @exclusive + * is false then overlaps with any resource group or hardware entities + * will be considered. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: false if CBM does not overlap, true if it does. + */ +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, + enum resctrl_conf_type type, bool exclusive) +{ + enum rdtgrp_mode mode; + unsigned long ctrl_b; + int i; + + /* Check for any overlap with regions used by hardware directly */ + if (!exclusive) { + ctrl_b = r->cache.shareable_bits; + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) + return true; + } + + /* Check for overlap with other resource groups */ + for (i = 0; i < closids_supported(); i++) { + ctrl_b = resctrl_arch_get_config(r, d, i, type); + mode = rdtgroup_mode_by_closid(i); + if (closid_allocated(i) && i != closid && + mode != RDT_MODE_PSEUDO_LOCKSETUP) { + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { + if (exclusive) { + if (mode == RDT_MODE_EXCLUSIVE) + return true; + continue; + } + return true; + } + } + } + + return false; +} + +/** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @s: Schema for the resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + struct rdt_resource *r = s->res; + + if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, + exclusive)) + return true; + + if (!resctrl_arch_get_cdp_enabled(r->rid)) + return false; + return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); +} + +/** + * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. + * + * An exclusive resource group implies that there should be no sharing of + * its allocated resources. At the time this group is considered to be + * exclusive this test can determine if its current schemata supports this + * setting by testing for overlap with all other resource groups. + * + * Return: true if resource group can be exclusive, false if there is overlap + * with allocations of other resource groups and thus this resource group + * cannot be exclusive. + */ +static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) +{ + int closid = rdtgrp->closid; + struct resctrl_schema *s; + struct rdt_resource *r; + bool has_cache = false; + struct rdt_domain *d; + u32 ctrl; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) + continue; + has_cache = true; + list_for_each_entry(d, &r->domains, list) { + ctrl = resctrl_arch_get_config(r, d, closid, + s->conf_type); + if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { + rdt_last_cmd_puts("Schemata overlaps\n"); + return false; + } + } + } + + if (!has_cache) { + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); + return false; + } + + return true; +} + +/* + * rdtgroup_mode_write - Modify the resource group's mode + */ +static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + enum rdtgrp_mode mode; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + rdt_last_cmd_clear(); + + mode = rdtgrp->mode; + + if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || + (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || + (!strcmp(buf, "pseudo-locksetup") && + mode == RDT_MODE_PSEUDO_LOCKSETUP) || + (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) + goto out; + + if (mode == RDT_MODE_PSEUDO_LOCKED) { + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); + ret = -EINVAL; + goto out; + } + + if (!strcmp(buf, "shareable")) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_SHAREABLE; + } else if (!strcmp(buf, "exclusive")) { + if (!rdtgroup_mode_test_exclusive(rdtgrp)) { + ret = -EINVAL; + goto out; + } + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_EXCLUSIVE; + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { + ret = rdtgroup_locksetup_enter(rdtgrp); + if (ret) + goto out; + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; + } else { + rdt_last_cmd_puts("Unknown or unsupported mode\n"); + ret = -EINVAL; + } + +out: + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +/** + * rdtgroup_cbm_to_size - Translate CBM to size in bytes + * @r: RDT resource to which @d belongs. + * @d: RDT domain instance. + * @cbm: bitmask for which the size should be computed. + * + * The bitmask provided associated with the RDT domain instance @d will be + * translated into how many bytes it represents. The size in bytes is + * computed by first dividing the total cache size by the CBM length to + * determine how many bytes each bit in the bitmask represents. The result + * is multiplied with the number of bits set in the bitmask. + * + * @cbm is unsigned long, even if only 32 bits are used to make the + * bitmap functions work correctly. + */ +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, + struct rdt_domain *d, unsigned long cbm) +{ + struct cpu_cacheinfo *ci; + unsigned int size = 0; + int num_b, i; + + num_b = bitmap_weight(&cbm, r->cache.cbm_len); + ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == r->cache_level) { + size = ci->info_list[i].size / r->cache.cbm_len * num_b; + break; + } + } + + return size; +} + +/* + * rdtgroup_size_show - Display size in bytes of allocated regions + * + * The "size" file mirrors the layout of the "schemata" file, printing the + * size in bytes of each region instead of the capacity bitmask. + */ +static int rdtgroup_size_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + enum resctrl_conf_type type; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + struct rdt_domain *d; + unsigned int size; + int ret = 0; + u32 closid; + bool sep; + u32 ctrl; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%*s:", max_name_width, + rdtgrp->plr->s->name); + size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, + rdtgrp->plr->d, + rdtgrp->plr->cbm); + seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + } + goto out; + } + + closid = rdtgrp->closid; + + list_for_each_entry(schema, &resctrl_schema_all, list) { + r = schema->res; + type = schema->conf_type; + sep = false; + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(d, &r->domains, list) { + if (sep) + seq_putc(s, ';'); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + size = 0; + } else { + if (is_mba_sc(r)) + ctrl = d->mbps_val[closid]; + else + ctrl = resctrl_arch_get_config(r, d, + closid, + type); + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); + } + seq_printf(s, "%d=%u", d->id, size); + sep = true; + } + seq_putc(s, '\n'); + } + +out: + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) +{ + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); +} + +static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) +{ + struct resctrl_mon_config_info mon_info = {0}; + struct rdt_domain *dom; + bool sep = false; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + + seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); + sep = true; + } + seq_puts(s, "\n"); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return 0; +} + +static int mbm_total_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); + + return 0; +} + +static int mbm_local_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); + + return 0; +} + +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) +{ + struct resctrl_mon_config_info mon_info = {0}; + + /* + * Read the current config value first. If both are the same then + * no need to write it again. + */ + mon_info.r = r; + mon_info.d = d; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + if (mon_info.mon_config == val) + return 0; + + mon_info.mon_config = val; + + /* + * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the + * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE + * are scoped at the domain level. Writing any of these MSRs + * on one CPU is observed by all the CPUs in the domain. + */ + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, + &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } + + /* + * When an Event Configuration is changed, the bandwidth counters + * for all RMIDs and Events will be cleared by the hardware. The + * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for + * every RMID on the next read to any event for every RMID. + * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) + * cleared while it is tracked by the hardware. Clear the + * mbm_local and mbm_total counts for all the RMIDs. + */ + resctrl_arch_reset_rmid_all(r, d); + + return 0; +} + +static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) +{ + char *dom_str = NULL, *id_str; + unsigned long dom_id, val; + struct rdt_domain *d; + int err; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + +next: + if (!tok || tok[0] == '\0') + return 0; + + /* Start processing the strings for each domain */ + dom_str = strim(strsep(&tok, ";")); + id_str = strsep(&dom_str, "="); + + if (!id_str || kstrtoul(id_str, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); + return -EINVAL; + } + + if (!dom_str || kstrtoul(dom_str, 16, &val)) { + rdt_last_cmd_puts("Non-numeric event configuration value\n"); + return -EINVAL; + } + + /* Value from user cannot be more than the supported set of events */ + if ((val & r->mbm_cfg_mask) != val) { + rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", + r->mbm_cfg_mask); + return -EINVAL; + } + + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; + goto next; + } + } + + return -EINVAL; +} + +static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +/* rdtgroup information files for one cache resource. */ +static struct rftype res_common_files[] = { + { + .name = "last_cmd_status", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_last_cmd_status_show, + .fflags = RFTYPE_TOP_INFO, + }, + { + .name = "num_closids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_closids_show, + .fflags = RFTYPE_CTRL_INFO, + }, + { + .name = "mon_features", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_features_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "num_rmids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_rmids_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "cbm_mask", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_default_ctrl_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_cbm_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_cbm_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "shareable_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_shareable_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "bit_usage", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bit_usage_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_bandwidth", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_bw_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "bandwidth_gran", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bw_gran_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "delay_linear", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_delay_linear_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + /* + * Platform specific which (if any) capabilities are provided by + * thread_throttle_mode. Defer "fflags" initialization to platform + * discovery. + */ + { + .name = "thread_throttle_mode", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_thread_throttle_mode_show, + }, + { + .name = "max_threshold_occupancy", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = max_threshold_occ_write, + .seq_show = max_threshold_occ_show, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "mbm_total_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_total_bytes_config_show, + .write = mbm_total_bytes_config_write, + }, + { + .name = "mbm_local_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_local_bytes_config_show, + .write = mbm_local_bytes_config_write, + }, + { + .name = "cpus", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "cpus_list", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_BASE, + }, + { + .name = "tasks", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_tasks_write, + .seq_show = rdtgroup_tasks_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, + { + .name = "schemata", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_schemata_write, + .seq_show = rdtgroup_schemata_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "mode", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_mode_write, + .seq_show = rdtgroup_mode_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "size", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_size_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, + +}; + +static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) +{ + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + lockdep_assert_held(&rdtgroup_mutex); + + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + + for (rft = rfts; rft < rfts + len; rft++) { + if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { + ret = rdtgroup_add_file(kn, rft); + if (ret) + goto error; + } + } + + return 0; +error: + pr_warn("Failed to add %s, err=%d\n", rft->name, ret); + while (--rft >= rfts) { + if ((fflags & rft->fflags) == rft->fflags) + kernfs_remove_by_name(kn, rft->name); + } + return ret; +} + +static struct rftype *rdtgroup_get_rftype_by_name(const char *name) +{ + struct rftype *rfts, *rft; + int len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + return rft; + } + + return NULL; +} + +static void thread_throttle_mode_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + struct rftype *rft; + + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); + if (!rft) + return; + + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; +} + +void mbm_config_rftype_init(const char *config) +{ + struct rftype *rft; + + rft = rdtgroup_get_rftype_by_name(config); + if (rft) + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; +} + +/** + * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * + * The permissions of named resctrl file, directory, or link are modified + * to not allow read, write, or execute by any user. + * + * WARNING: This function is intended to communicate to the user that the + * resctrl file has been locked down - that it is not relevant to the + * particular state the system finds itself in. It should not be relied + * on to protect from user access because after the file's permissions + * are restricted the user can still change the permissions using chmod + * from the command line. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn; + int ret = 0; + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + iattr.ia_mode = S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode = S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode = S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +/** + * rdtgroup_kn_mode_restore - Restore user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * @mask: Mask of permissions that should be restored + * + * Restore the permissions of the named file. If @name is a directory the + * permissions of its parent will be used. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn, *parent; + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + iattr.ia_mode = rft->mode & mask; + } + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + parent = kernfs_get_parent(kn); + if (parent) { + iattr.ia_mode |= parent->mode; + kernfs_put(parent); + } + iattr.ia_mode |= S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode |= S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode |= S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +static int rdtgroup_mkdir_info_resdir(void *priv, char *name, + unsigned long fflags) +{ + struct kernfs_node *kn_subdir; + int ret; + + kn_subdir = kernfs_create_dir(kn_info, name, + kn_info->mode, priv); + if (IS_ERR(kn_subdir)) + return PTR_ERR(kn_subdir); + + ret = rdtgroup_kn_set_ugid(kn_subdir); + if (ret) + return ret; + + ret = rdtgroup_add_files(kn_subdir, fflags); + if (!ret) + kernfs_activate(kn_subdir); + + return ret; +} + +static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) +{ + enum resctrl_res_level i; + struct resctrl_schema *s; + struct rdt_resource *r; + unsigned long fflags; + char name[32]; + int ret; + + /* create the directory */ + kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); + if (IS_ERR(kn_info)) + return PTR_ERR(kn_info); + + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); + if (ret) + goto out_destroy; + + /* loop over enabled controls, these are all alloc_capable */ + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + fflags = r->fflags | RFTYPE_CTRL_INFO; + ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); + if (ret) + goto out_destroy; + } + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; + sprintf(name, "%s_MON", r->name); + ret = rdtgroup_mkdir_info_resdir(r, name, fflags); + if (ret) + goto out_destroy; + } + + ret = rdtgroup_kn_set_ugid(kn_info); + if (ret) + goto out_destroy; + + kernfs_activate(kn_info); + + return 0; + +out_destroy: + kernfs_remove(kn_info); + return ret; +} + +static int +mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, + char *name, struct kernfs_node **dest_kn) +{ + struct kernfs_node *kn; + int ret; + + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + if (dest_kn) + *dest_kn = kn; + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + kernfs_activate(kn); + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +static inline bool is_mba_linear(void) +{ + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; +} + +static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 num_closid = resctrl_arch_get_num_closid(r); + int cpu = cpumask_any(&d->cpu_mask); + int i; + + d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), + GFP_KERNEL, cpu_to_node(cpu)); + if (!d->mbps_val) + return -ENOMEM; + + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + + return 0; +} + +static void mba_sc_domain_destroy(struct rdt_resource *r, + struct rdt_domain *d) +{ + kfree(d->mbps_val); + d->mbps_val = NULL; +} + +/* + * MBA software controller is supported only if + * MBM is supported and MBA is in linear scale. + */ +static bool supports_mba_mbps(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + return (resctrl_arch_is_mbm_local_enabled() && + r->alloc_capable && is_mba_linear()); +} + +/* + * Enable or disable the MBA software controller + * which helps user specify bandwidth in MBps. + */ +static int set_mba_sc(bool mba_sc) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rdt_domain *d; + int i; + + if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) + return -EINVAL; + + r->membw.mba_sc = mba_sc; + + list_for_each_entry(d, &r->domains, list) { + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + } + + return 0; +} + +/* + * We don't allow rdtgroup directories to be created anywhere + * except the root directory. Thus when looking for the rdtgroup + * structure for a kernfs node we are either looking at a directory, + * in which case the rdtgroup structure is pointed at by the "priv" + * field, otherwise we have a file, and need only look to the parent + * to find the rdtgroup. + */ +static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) +{ + if (kernfs_type(kn) == KERNFS_DIR) { + /* + * All the resource directories use "kn->priv" + * to point to the "struct rdtgroup" for the + * resource. "info" and its subdirectories don't + * have rdtgroup structures, so return NULL here. + */ + if (kn == kn_info || kn->parent == kn_info) + return NULL; + else + return kn->priv; + } else { + return kn->parent->priv; + } +} + +static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + atomic_inc(&rdtgrp->waitcount); + kernfs_break_active_protection(kn); +} + +static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + kernfs_unbreak_active_protection(kn); + rdtgroup_remove(rdtgrp); + } else { + kernfs_unbreak_active_protection(kn); + } +} + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return NULL; + + rdtgroup_kn_get(rdtgrp, kn); + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* Was this group deleted while we waited? */ + if (rdtgrp->flags & RDT_DELETED) + return NULL; + + return rdtgrp; +} + +void rdtgroup_kn_unlock(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return; + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + rdtgroup_kn_put(rdtgrp, kn); +} + +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **mon_data_kn); + +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); + + resctrl_debug = false; +} + +static int rdt_enable_ctx(struct rdt_fs_context *ctx) +{ + int ret = 0; + + if (ctx->enable_cdpl2) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } + + if (ctx->enable_cdpl3) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } + + if (ctx->enable_mba_mbps) { + ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + if (ctx->enable_debug) + resctrl_debug = true; + + return 0; + +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: + return ret; +} + +static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) +{ + struct resctrl_schema *s; + const char *suffix = ""; + int ret, cl; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->res = r; + s->num_closid = resctrl_arch_get_num_closid(r); + if (resctrl_arch_get_cdp_enabled(r->rid)) + s->num_closid /= 2; + + s->conf_type = type; + switch (type) { + case CDP_CODE: + suffix = "CODE"; + break; + case CDP_DATA: + suffix = "DATA"; + break; + case CDP_NONE: + suffix = ""; + break; + } + + ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); + if (ret >= sizeof(s->name)) { + kfree(s); + return -EINVAL; + } + + cl = strlen(s->name); + + /* + * If CDP is supported by this resource, but not enabled, + * include the suffix. This ensures the tabular format of the + * schemata file does not change between mounts of the filesystem. + */ + if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) + cl += 4; + + if (cl > max_name_width) + max_name_width = cl; + + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + + INIT_LIST_HEAD(&s->list); + list_add(&s->list, &resctrl_schema_all); + + return 0; +} + +static int schemata_list_create(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + int ret = 0; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + if (resctrl_arch_get_cdp_enabled(r->rid)) { + ret = schemata_list_add(r, CDP_CODE); + if (ret) + break; + + ret = schemata_list_add(r, CDP_DATA); + } else { + ret = schemata_list_add(r, CDP_NONE); + } + + if (ret) + break; + } + + return ret; +} + +static void schemata_list_destroy(void) +{ + struct resctrl_schema *s, *tmp; + + list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { + list_del(&s->list); + kfree(s); + } +} + +static int rdt_get_tree(struct fs_context *fc) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; + struct rdt_domain *dom; + int ret; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + /* + * resctrl file system can only be mounted once. + */ + if (resctrl_mounted) { + ret = -EBUSY; + goto out; + } + + ret = rdtgroup_setup_root(ctx); + if (ret) + goto out; + + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + + ret = schemata_list_create(); + if (ret) { + schemata_list_destroy(); + goto out_ctx; + } + + closid_init(); + + if (resctrl_arch_mon_capable()) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); + if (ret < 0) + goto out_schemata_free; + + if (resctrl_arch_mon_capable()) { + ret = mongroup_create_dir(rdtgroup_default.kn, + &rdtgroup_default, "mon_groups", + &kn_mongrp); + if (ret < 0) + goto out_info; + + ret = mkdir_mondata_all(rdtgroup_default.kn, + &rdtgroup_default, &kn_mondata); + if (ret < 0) + goto out_mongrp; + rdtgroup_default.mon.mon_data_kn = kn_mondata; + } + + ret = rdt_pseudo_lock_init(); + if (ret) + goto out_mondata; + + ret = kernfs_get_tree(fc); + if (ret < 0) + goto out_psl; + + if (resctrl_arch_alloc_capable()) + resctrl_arch_enable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_enable_mon(); + + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) + resctrl_mounted = true; + + if (resctrl_is_mbm_enabled()) { + list_for_each_entry(dom, &l3->domains, list) + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + goto out; + +out_psl: + rdt_pseudo_lock_release(); +out_mondata: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mondata); +out_mongrp: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mongrp); +out_info: + kernfs_remove(kn_info); +out_schemata_free: + schemata_list_destroy(); +out_ctx: + rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); +out: + rdt_last_cmd_clear(); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +enum rdt_param { + Opt_cdp, + Opt_cdpl2, + Opt_mba_mbps, + Opt_debug, + nr__rdt_params +}; + +static const struct fs_parameter_spec rdt_fs_parameters[] = { + fsparam_flag("cdp", Opt_cdp), + fsparam_flag("cdpl2", Opt_cdpl2), + fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), + {} +}; + +static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + struct fs_parse_result result; + int opt; + + opt = fs_parse(fc, rdt_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_cdp: + ctx->enable_cdpl3 = true; + return 0; + case Opt_cdpl2: + ctx->enable_cdpl2 = true; + return 0; + case Opt_mba_mbps: + if (!supports_mba_mbps()) + return -EINVAL; + ctx->enable_mba_mbps = true; + return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; + } + + return -EINVAL; +} + +static void rdt_fs_context_free(struct fs_context *fc) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + + kernfs_free_fs_context(fc); + kfree(ctx); +} + +static const struct fs_context_operations rdt_fs_context_ops = { + .free = rdt_fs_context_free, + .parse_param = rdt_parse_param, + .get_tree = rdt_get_tree, +}; + +static int rdt_init_fs_context(struct fs_context *fc) +{ + struct rdt_fs_context *ctx; + + ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; + fc->fs_private = &ctx->kfc; + fc->ops = &rdt_fs_context_ops; + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; +} + +/* + * Move tasks from one to the other group. If @from is NULL, then all tasks + * in the systems are moved unconditionally (used for teardown). + * + * If @mask is not NULL the cpus on which moved tasks are running are set + * in that mask so the update smp function call is restricted to affected + * cpus. + */ +static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, + struct cpumask *mask) +{ + struct task_struct *p, *t; + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (!from || is_closid_match(t, from) || + is_rmid_match(t, from)) { + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); + + /* + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * If the task is on a CPU, set the CPU in the mask. + * The detection is inaccurate as tasks might move or + * schedule before the smp function call takes place. + * In such a case the function call is pointless, but + * there is no other side effect. + */ + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) + cpumask_set_cpu(task_cpu(t), mask); + } + } + read_unlock(&tasklist_lock); +} + +static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) +{ + struct rdtgroup *sentry, *stmp; + struct list_head *head; + + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { + free_rmid(sentry->closid, sentry->mon.rmid); + list_del(&sentry->mon.crdtgrp_list); + + if (atomic_read(&sentry->waitcount) != 0) + sentry->flags = RDT_DELETED; + else + rdtgroup_remove(sentry); + } +} + +/* + * Forcibly remove all of subdirectories under root. + */ +static void rmdir_all_sub(void) +{ + struct rdtgroup *rdtgrp, *tmp; + + /* Move all tasks to the default resource group */ + rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); + + list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { + /* Free any child rmids */ + free_all_child_rdtgrp(rdtgrp); + + /* Remove each rdtgroup other than root */ + if (rdtgrp == &rdtgroup_default) + continue; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + + /* + * Give any CPUs back to the default group. We cannot copy + * cpu_online_mask because a CPU might have executed the + * offline callback already, but is still marked online. + */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + kernfs_remove(rdtgrp->kn); + list_del(&rdtgrp->rdtgroup_list); + + if (atomic_read(&rdtgrp->waitcount) != 0) + rdtgrp->flags = RDT_DELETED; + else + rdtgroup_remove(rdtgrp); + } + /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ + update_closid_rmid(cpu_online_mask, &rdtgroup_default); + + kernfs_remove(kn_info); + kernfs_remove(kn_mongrp); + kernfs_remove(kn_mondata); +} + +static void rdt_kill_sb(struct super_block *sb) +{ + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_disable_ctx(); + + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + + rmdir_all_sub(); + rdt_pseudo_lock_release(); + rdtgroup_default.mode = RDT_MODE_SHAREABLE; + schemata_list_destroy(); + rdtgroup_destroy_root(); + if (resctrl_arch_alloc_capable()) + resctrl_arch_disable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_disable_mon(); + resctrl_mounted = false; + kernfs_kill_sb(sb); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +static struct file_system_type rdt_fs_type = { + .name = "resctrl", + .init_fs_context = rdt_init_fs_context, + .parameters = rdt_fs_parameters, + .kill_sb = rdt_kill_sb, +}; + +static int mon_addfile(struct kernfs_node *parent_kn, const char *name, + void *priv) +{ + struct kernfs_node *kn; + int ret = 0; + + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, + &kf_mondata_ops, priv, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return ret; +} + +/* + * Remove all subdirectories of mon_data of ctrl_mon groups + * and monitor groups with given domain id. + */ +static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + unsigned int dom_id) +{ + struct rdtgroup *prgrp, *crgrp; + char name[32]; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + sprintf(name, "mon_%s_%02d", r->name, dom_id); + kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); + + list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) + kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); + } +} + +static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, + struct rdt_domain *d, + struct rdt_resource *r, struct rdtgroup *prgrp) +{ + union mon_data_bits priv; + struct kernfs_node *kn; + struct mon_evt *mevt; + struct rmid_read rr; + char name[32]; + int ret; + + sprintf(name, "mon_%s_%02d", r->name, d->id); + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + if (WARN_ON(list_empty(&r->evt_list))) { + ret = -EPERM; + goto out_destroy; + } + + priv.u.rid = r->rid; + priv.u.domid = d->id; + list_for_each_entry(mevt, &r->evt_list, list) { + priv.u.evtid = mevt->evtid; + ret = mon_addfile(kn, mevt->name, priv.priv); + if (ret) + goto out_destroy; + + if (resctrl_is_mbm_event(mevt->evtid)) + mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); + } + kernfs_activate(kn); + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/* + * Add all subdirectories of mon_data for "ctrl_mon" groups + * and "monitor" groups with given domain id. + */ +static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + struct rdt_domain *d) +{ + struct kernfs_node *parent_kn; + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + parent_kn = prgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, prgrp); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + parent_kn = crgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, crgrp); + } + } +} + +static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, + struct rdt_resource *r, + struct rdtgroup *prgrp) +{ + struct rdt_domain *dom; + int ret; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(dom, &r->domains, list) { + ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); + if (ret) + return ret; + } + + return 0; +} + +/* + * This creates a directory mon_data which contains the monitored data. + * + * mon_data has one directory for each domain which are named + * in the format mon__. For ex: A mon_data + * with L3 domain looks as below: + * ./mon_data: + * mon_L3_00 + * mon_L3_01 + * mon_L3_02 + * ... + * + * Each domain directory has one file per event: + * ./mon_L3_00/: + * llc_occupancy + * + */ +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **dest_kn) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct kernfs_node *kn; + int ret; + + /* + * Create the mon_data directory first. + */ + ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); + if (ret) + return ret; + + if (dest_kn) + *dest_kn = kn; + + /* + * Create the subdirectories for each domain. Note that all events + * in a domain like L3 are grouped into a resource whose domain is L3 + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); + if (ret) + goto out_destroy; + } + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/** + * cbm_ensure_valid - Enforce validity on provided CBM + * @_val: Candidate CBM + * @r: RDT resource to which the CBM belongs + * + * The provided CBM represents all cache portions available for use. This + * may be represented by a bitmap that does not consist of contiguous ones + * and thus be an invalid CBM. + * Here the provided CBM is forced to be a valid CBM by only considering + * the first set of contiguous bits as valid and clearing all bits. + * The intention here is to provide a valid default CBM with which a new + * resource group is initialized. The user can follow this with a + * modification to the CBM if the default does not satisfy the + * requirements. + */ +static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) +{ + unsigned int cbm_len = r->cache.cbm_len; + unsigned long first_bit, zero_bit; + unsigned long val = _val; + + if (!val) + return 0; + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Clear any remaining bits to ensure contiguous region */ + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + return (u32)val; +} + +/* + * Initialize cache resources per RDT domain + * + * Set the RDT domain up to start off with all usable allocations. That is, + * all shareable and unused bits. All-zero CBM is invalid. + */ +static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, + u32 closid) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 used_b = 0, unused_b = 0; + unsigned long tmp_cbm; + enum rdtgrp_mode mode; + u32 peer_ctl, ctrl_val; + int i; + + cfg = &d->staged_config[t]; + cfg->have_new_ctrl = false; + cfg->new_ctrl = r->cache.shareable_bits; + used_b = r->cache.shareable_bits; + for (i = 0; i < closids_supported(); i++) { + if (closid_allocated(i) && i != closid) { + mode = rdtgroup_mode_by_closid(i); + if (mode == RDT_MODE_PSEUDO_LOCKSETUP) + /* + * ctrl values for locksetup aren't relevant + * until the schemata is written, and the mode + * becomes RDT_MODE_PSEUDO_LOCKED. + */ + continue; + /* + * If CDP is active include peer domain's + * usage to ensure there is no overlap + * with an exclusive group. + */ + if (resctrl_arch_get_cdp_enabled(r->rid)) + peer_ctl = resctrl_arch_get_config(r, d, i, + peer_type); + else + peer_ctl = 0; + ctrl_val = resctrl_arch_get_config(r, d, i, + s->conf_type); + used_b |= ctrl_val | peer_ctl; + if (mode == RDT_MODE_SHAREABLE) + cfg->new_ctrl |= ctrl_val | peer_ctl; + } + } + if (d->plr && d->plr->cbm > 0) + used_b |= d->plr->cbm; + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); + unused_b &= BIT_MASK(r->cache.cbm_len) - 1; + cfg->new_ctrl |= unused_b; + /* + * Force the initial CBM to be valid, user can + * modify the CBM based on system availability. + */ + cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); + /* + * Assign the u32 CBM to an unsigned long to ensure that + * bitmap_weight() does not access out-of-bound memory. + */ + tmp_cbm = cfg->new_ctrl; + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); + return -ENOSPC; + } + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Initialize cache resources with default values. + * + * A new RDT group is being created on an allocation capable (CAT) + * supporting system. Set this group up to start off with all usable + * allocations. + * + * If there are no more shareable bits available on any domain then + * the entire allocation will fail. + */ +static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +{ + struct rdt_domain *d; + int ret; + + list_for_each_entry(d, &s->res->domains, list) { + ret = __init_one_rdt_domain(d, s, closid); + if (ret < 0) + return ret; + } + + return 0; +} + +/* Initialize MBA resource with default values. */ +static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) +{ + struct resctrl_staged_config *cfg; + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + if (is_mba_sc(r)) { + d->mbps_val[closid] = MBA_MAX_MBPS; + continue; + } + + cfg = &d->staged_config[CDP_NONE]; + cfg->new_ctrl = r->default_ctrl; + cfg->have_new_ctrl = true; + } +} + +/* Initialize the RDT group's allocations. */ +static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + struct rdt_resource *r; + int ret = 0; + + rdt_staged_configs_clear(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) { + rdtgroup_init_mba(r, rdtgrp->closid); + if (is_mba_sc(r)) + continue; + } else { + ret = rdtgroup_init_cat(s, rdtgrp->closid); + if (ret < 0) + goto out; + } + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Failed to initialize allocations\n"); + goto out; + } + + } + + rdtgrp->mode = RDT_MODE_SHAREABLE; + +out: + rdt_staged_configs_clear(); + return ret; +} + +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!resctrl_arch_mon_capable()) + return 0; + + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (resctrl_arch_mon_capable()) + free_rmid(rgrp->closid, rgrp->mon.rmid); +} + +static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, + const char *name, umode_t mode, + enum rdt_group_type rtype, struct rdtgroup **r) +{ + struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; + struct kernfs_node *kn; + int ret; + + prdtgrp = rdtgroup_kn_lock_live(parent_kn); + if (!prdtgrp) { + ret = -ENODEV; + goto out_unlock; + } + + if (rtype == RDTMON_GROUP && + (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto out_unlock; + } + + /* allocate the rdtgroup. */ + rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); + if (!rdtgrp) { + ret = -ENOSPC; + rdt_last_cmd_puts("Kernel out of memory\n"); + goto out_unlock; + } + *r = rdtgrp; + rdtgrp->mon.parent = prdtgrp; + rdtgrp->type = rtype; + INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); + + /* kernfs creates the directory for rdtgrp */ + kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); + if (IS_ERR(kn)) { + ret = PTR_ERR(kn); + rdt_last_cmd_puts("kernfs create error\n"); + goto out_free_rgrp; + } + rdtgrp->kn = kn; + + /* + * kernfs_remove() will drop the reference count on "kn" which + * will free it. But we still need it to stick around for the + * rdtgroup_kn_unlock(kn) call. Take one extra reference here, + * which will be dropped by kernfs_put() in rdtgroup_remove(). + */ + kernfs_get(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + rdt_last_cmd_puts("kernfs perm error\n"); + goto out_destroy; + } + + if (rtype == RDTCTRL_GROUP) { + files = RFTYPE_BASE | RFTYPE_CTRL; + if (resctrl_arch_mon_capable()) + files |= RFTYPE_MON; + } else { + files = RFTYPE_BASE | RFTYPE_MON; + } + + ret = rdtgroup_add_files(kn, files); + if (ret) { + rdt_last_cmd_puts("kernfs fill error\n"); + goto out_destroy; + } + + /* + * The caller unlocks the parent_kn upon success. + */ + return 0; + +out_destroy: + kernfs_put(rdtgrp->kn); + kernfs_remove(rdtgrp->kn); +out_free_rgrp: + kfree(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) +{ + kernfs_remove(rgrp->kn); + rdtgroup_remove(rgrp); +} + +/* + * Create a monitor group under "mon_groups" directory of a control + * and monitor group(ctrl_mon). This is a resource group + * to monitor a subset of tasks and cpus in its parent ctrl_mon group. + */ +static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp, *prgrp; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); + if (ret) + return ret; + + prgrp = rdtgrp->mon.parent; + rdtgrp->closid = prgrp->closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + + /* + * Add the rdtgrp to the list of rdtgrps the parent + * ctrl_mon group has to track. + */ + list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); + +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * These are rdtgroups created under the root directory. Can be used + * to allocate and monitor resources. + */ +static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp; + struct kernfs_node *kn; + u32 closid; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); + if (ret) + return ret; + + kn = rdtgrp->kn; + ret = closid_alloc(); + if (ret < 0) { + rdt_last_cmd_puts("Out of CLOSIDs\n"); + goto out_common_fail; + } + closid = ret; + ret = 0; + + rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + + ret = rdtgroup_init_alloc(rdtgrp); + if (ret < 0) + goto out_rmid_free; + + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); + + if (resctrl_arch_mon_capable()) { + /* + * Create an empty mon_groups directory to hold the subset + * of tasks and cpus to monitor. + */ + ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + goto out_del_list; + } + } + + goto out_unlock; + +out_del_list: + list_del(&rdtgrp->rdtgroup_list); +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: + closid_free(closid); +out_common_fail: + mkdir_rdt_prepare_clean(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * We allow creating mon groups only with in a directory called "mon_groups" + * which is present in every ctrl_mon group. Check if this is a valid + * "mon_groups" directory. + * + * 1. The directory should be named "mon_groups". + * 2. The mon group itself should "not" be named "mon_groups". + * This makes sure "mon_groups" directory always has a ctrl_mon group + * as parent. + */ +static bool is_mon_groups(struct kernfs_node *kn, const char *name) +{ + return (!strcmp(kn->name, "mon_groups") && + strcmp(name, "mon_groups")); +} + +static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, + umode_t mode) +{ + /* Do not accept '\n' to avoid unparsable situation. */ + if (strchr(name, '\n')) + return -EINVAL; + + /* + * If the parent directory is the root directory and RDT + * allocation is supported, add a control and monitoring + * subdirectory + */ + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) + return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); + + /* + * If RDT monitoring is supported and the parent directory is a valid + * "mon_groups" directory, add a monitoring subdirectory. + */ + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) + return rdtgroup_mkdir_mon(parent_kn, name, mode); + + return -EPERM; +} + +static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the parent group */ + rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); + + /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + rdtgrp->flags = RDT_DELETED; + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + /* + * Remove the rdtgrp from the parent ctrl_mon group's list + */ + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_del(&rdtgrp->mon.crdtgrp_list); + + kernfs_remove(rdtgrp->kn); + + return 0; +} + +static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) +{ + rdtgrp->flags = RDT_DELETED; + list_del(&rdtgrp->rdtgroup_list); + + kernfs_remove(rdtgrp->kn); + return 0; +} + +static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the default group */ + rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); + + /* Give any CPUs back to the default group */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + /* Update per cpu closid and rmid of the moved CPUs first */ + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + closid_free(rdtgrp->closid); + + rdtgroup_ctrl_remove(rdtgrp); + + /* + * Free all the child monitor group rmids. + */ + free_all_child_rdtgrp(rdtgrp); + + return 0; +} + +static int rdtgroup_rmdir(struct kernfs_node *kn) +{ + struct kernfs_node *parent_kn = kn->parent; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret = 0; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + + rdtgrp = rdtgroup_kn_lock_live(kn); + if (!rdtgrp) { + ret = -EPERM; + goto out; + } + + /* + * If the rdtgroup is a ctrl_mon group and parent directory + * is the root directory, remove the ctrl_mon group. + * + * If the rdtgroup is a mon group and parent directory + * is a valid "mon_groups" directory, remove the mon group. + */ + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = rdtgroup_ctrl_remove(rdtgrp); + } else { + ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); + } + } else if (rdtgrp->type == RDTMON_GROUP && + is_mon_groups(parent_kn, kn->name)) { + ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); + } else { + ret = -EPERM; + } + +out: + rdtgroup_kn_unlock(kn); + free_cpumask_var(tmpmask); + return ret; +} + +/** + * mongrp_reparent() - replace parent CTRL_MON group of a MON group + * @rdtgrp: the MON group whose parent should be replaced + * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp + * @cpus: cpumask provided by the caller for use during this call + * + * Replaces the parent CTRL_MON group for a MON group, resulting in all member + * tasks' CLOSID immediately changing to that of the new parent group. + * Monitoring data for the group is unaffected by this operation. + */ +static void mongrp_reparent(struct rdtgroup *rdtgrp, + struct rdtgroup *new_prdtgrp, + cpumask_var_t cpus) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + + WARN_ON(rdtgrp->type != RDTMON_GROUP); + WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); + + /* Nothing to do when simply renaming a MON group. */ + if (prdtgrp == new_prdtgrp) + return; + + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_move_tail(&rdtgrp->mon.crdtgrp_list, + &new_prdtgrp->mon.crdtgrp_list); + + rdtgrp->mon.parent = new_prdtgrp; + rdtgrp->closid = new_prdtgrp->closid; + + /* Propagate updated closid to all tasks in this group. */ + rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); + + update_closid_rmid(cpus, NULL); +} + +static int rdtgroup_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, const char *new_name) +{ + struct rdtgroup *new_prdtgrp; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret; + + rdtgrp = kernfs_to_rdtgroup(kn); + new_prdtgrp = kernfs_to_rdtgroup(new_parent); + if (!rdtgrp || !new_prdtgrp) + return -ENOENT; + + /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ + rdtgroup_kn_get(rdtgrp, kn); + rdtgroup_kn_get(new_prdtgrp, new_parent); + + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + /* + * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if + * either kernfs_node is a file. + */ + if (kernfs_type(kn) != KERNFS_DIR || + kernfs_type(new_parent) != KERNFS_DIR) { + rdt_last_cmd_puts("Source and destination must be directories"); + ret = -EPERM; + goto out; + } + + if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { + ret = -ENOENT; + goto out; + } + + if (rdtgrp->type != RDTMON_GROUP || !kn->parent || + !is_mon_groups(kn->parent, kn->name)) { + rdt_last_cmd_puts("Source must be a MON group\n"); + ret = -EPERM; + goto out; + } + + if (!is_mon_groups(new_parent, new_name)) { + rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); + ret = -EPERM; + goto out; + } + + /* + * If the MON group is monitoring CPUs, the CPUs must be assigned to the + * current parent CTRL_MON group and therefore cannot be assigned to + * the new parent, making the move illegal. + */ + if (!cpumask_empty(&rdtgrp->cpu_mask) && + rdtgrp->mon.parent != new_prdtgrp) { + rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); + ret = -EPERM; + goto out; + } + + /* + * Allocate the cpumask for use in mongrp_reparent() to avoid the + * possibility of failing to allocate it after kernfs_rename() has + * succeeded. + */ + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + /* + * Perform all input validation and allocations needed to ensure + * mongrp_reparent() will succeed before calling kernfs_rename(), + * otherwise it would be necessary to revert this call if + * mongrp_reparent() failed. + */ + ret = kernfs_rename(kn, new_parent, new_name); + if (!ret) + mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); + + free_cpumask_var(tmpmask); + +out: + mutex_unlock(&rdtgroup_mutex); + rdtgroup_kn_put(rdtgrp, kn); + rdtgroup_kn_put(new_prdtgrp, new_parent); + return ret; +} + +static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) +{ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) + seq_puts(seq, ",cdp"); + + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) + seq_puts(seq, ",mba_MBps"); + + if (resctrl_debug) + seq_puts(seq, ",debug"); + + return 0; +} + +static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { + .mkdir = rdtgroup_mkdir, + .rmdir = rdtgroup_rmdir, + .rename = rdtgroup_rename, + .show_options = rdtgroup_show_options, +}; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) +{ + rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, + KERNFS_ROOT_CREATE_DEACTIVATED | + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, + &rdtgroup_default); + if (IS_ERR(rdt_root)) + return PTR_ERR(rdt_root); + + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void rdtgroup_setup_default(void) +{ + mutex_lock(&rdtgroup_mutex); + + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; + rdtgroup_default.type = RDTCTRL_GROUP; + INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); + + list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); + + mutex_unlock(&rdtgroup_mutex); +} + +static void domain_destroy_mon_state(struct rdt_domain *d) +{ + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); +} + +void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + mba_sc_domain_destroy(r, d); + + if (!r->mon_capable) + goto out_unlock; + + /* + * If resctrl is mounted, remove all the + * per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + rmdir_mondata_subdir_allrdtgrp(r, d->id); + + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + /* + * When a package is going down, forcefully + * decrement rmid->ebusy. There is no way to know + * that the L3 was flushed and hence may lead to + * incorrect counts in rare scenarios, but leaving + * the RMID as busy creates RMID leaks if the + * package never comes back. + */ + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + + domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + size_t tsize; + + if (resctrl_arch_is_llc_occupancy_enabled()) { + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); + if (!d->rmid_busy_llc) + return -ENOMEM; + } + if (resctrl_arch_is_mbm_total_enabled()) { + tsize = sizeof(*d->mbm_total); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_total) { + bitmap_free(d->rmid_busy_llc); + return -ENOMEM; + } + } + if (resctrl_arch_is_mbm_local_enabled()) { + tsize = sizeof(*d->mbm_local); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_local) { + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + return -ENOMEM; + } + } + + return 0; +} + +int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + int err = 0; + + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { + /* RDT_RESOURCE_MBA is never mon_capable */ + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } + + if (!r->mon_capable) + goto out_unlock; + + err = domain_setup_mon_state(r, d); + if (err) + goto out_unlock; + + if (resctrl_is_mbm_enabled()) { + INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + if (resctrl_arch_is_llc_occupancy_enabled()) + INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); + + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + mkdir_mondata_subdir_allrdtgrp(r, d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +void resctrl_online_cpu(unsigned int cpu) +{ + mutex_lock(&rdtgroup_mutex); + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); +} + +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdtgroup *rdtgrp; + struct rdt_domain *d; + + mutex_lock(&rdtgroup_mutex); + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } + + if (!l3->mon_capable) + goto out_unlock; + + d = resctrl_get_domain_from_cpu(cpu, l3); + if (d) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +/* + * resctrl_init - resctrl filesystem initialization + * + * Setup resctrl file system including set up root, create mount point, + * register resctrl filesystem, and initialize files under root directory. + * + * Return: 0 on success or -errno + */ +int resctrl_init(void) +{ + int ret = 0; + + seq_buf_init(&last_cmd_status, last_cmd_status_buf, + sizeof(last_cmd_status_buf)); + + rdtgroup_setup_default(); + + thread_throttle_mode_init(); + + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); + if (ret) + return ret; + + ret = register_filesystem(&rdt_fs_type); + if (ret) + goto cleanup_mountpoint; + + /* + * Adding the resctrl debugfs directory here may not be ideal since + * it would let the resctrl debugfs directory appear on the debugfs + * filesystem before the resctrl filesystem is mounted. + * It may also be ok since that would enable debugging of RDT before + * resctrl is mounted. + * The reason why the debugfs directory is created here and not in + * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and + * during the debugfs directory creation also &sb->s_type->i_mutex_key + * (the lockdep class of inode->i_rwsem). Other filesystem + * interactions (eg. SyS_getdents) have the lock ordering: + * &sb->s_type->i_mutex_key --> &mm->mmap_lock + * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex + * is taken, thus creating dependency: + * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause + * issues considering the other two lock dependencies. + * By creating the debugfs directory here we avoid a dependency + * that may cause deadlock (even though file operations cannot + * occur until the filesystem is mounted, but I do not know how to + * tell lockdep that). + */ + debugfs_resctrl = debugfs_create_dir("resctrl", NULL); + + return 0; + +cleanup_mountpoint: + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + return ret; +} + +void resctrl_exit(void) +{ + debugfs_remove_recursive(debugfs_resctrl); + unregister_filesystem(&rdt_fs_type); + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); +} -- Gitee From a28f40d2421793919c5de6cc794e9b3b384fa7a8 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 2 Jul 2018 11:15:31 +0100 Subject: [PATCH 594/953] arm64: head.S: Initialise MPAM EL2 registers and disable traps ANBZ: #8686 commit 2dd87f04e14aaade03e43874d4134c76e00a4d92 morse-linux. Add code to head.S's el2_setup to detect MPAM and disable any EL2 traps. This register resets to an unknown value, setting it to the default parititons/pmg before we enable the MMU is the best thing to do. Kexec/kdump will depend on this if the previous kernel left the CPU configured with a restrictive configuration. If linux is booted at the highest implemented exception level el2_setup will clear the enable bit, disabling MPAM. This code can't be enabled until a subsequent patch adds the Kconfig and cpufeature boiler plate. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/el2_setup.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b7afaa026842..1e2181820a0a 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -208,6 +208,21 @@ msr spsr_el2, x0 .endm +.macro __init_el2_mpam +#ifdef CONFIG_ARM64_MPAM + /* Memory Partioning And Monitoring: disable EL2 traps */ + mrs x1, id_aa64pfr0_el1 + ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4 + cbz x0, .Lskip_mpam_\@ // skip if no MPAM + msr_s SYS_MPAM2_EL2, xzr // use the default partition + // and disable lower traps + mrs_s x0, SYS_MPAMIDR_EL1 + tbz x0, #17, .Lskip_mpam_\@ // skip if no MPAMHCR reg + msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 +.Lskip_mpam_\@: +#endif /* CONFIG_ARM64_MPAM */ +.endm + /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -225,6 +240,7 @@ __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr + __init_el2_mpam __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt -- Gitee From 9cd0be0eb1f787ce20032595a222e31f655adf79 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 2 Jul 2018 11:15:31 +0100 Subject: [PATCH 595/953] arm64: cpufeature: discover CPU support for MPAM ANBZ: #8686 commit a59cda5f355ee9fea8cfcb08f0266f0f1353143b morse-linux. ARMv8.4 adds support for 'Memory Partitioning And Monitoring' (MPAM) which describes an interface to cache and bandwidth controls wherever they appear in the system. Add support to detect MPAM. Like SVE, MPAM has an extra id register that describes the virtualisation support, which is optional. Detect this separately so we can detect mismatched/insane systems, but still use MPAM on the host even if the virtualisation support is missing. MPAM needs enabling at the highest implemented exception level, otherwise the register accesses trap. The 'enabled' flag is accessible to lower exception levels, but its in a register that traps when MPAM isn't enabled. The cpufeature 'matches' hook is extended to test this on one of the CPUs, so that firwmare can emulate MPAM as disabled if it is reserved for use by secure world. (If you have a boot failure that bisects here its likely your CPUs advertise MPAM in the id registers, but firmware failed to either enable or MPAM, or emulate the trap as if it were disabled) Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- .../arch/arm64/cpu-feature-registers.rst | 2 + arch/arm64/Kconfig | 19 ++++- arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/cpufeature.h | 13 ++++ arch/arm64/include/asm/mpam.h | 76 +++++++++++++++++++ arch/arm64/include/asm/sysreg.h | 8 ++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/cpufeature.c | 69 +++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 4 + arch/arm64/kernel/mpam.c | 8 ++ arch/arm64/tools/cpucaps | 1 + arch/arm64/tools/sysreg | 33 ++++++++ 12 files changed, 235 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/mpam.h create mode 100644 arch/arm64/kernel/mpam.c diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst index de6d8a4790e2..14ea68bcf196 100644 --- a/Documentation/arch/arm64/cpu-feature-registers.rst +++ b/Documentation/arch/arm64/cpu-feature-registers.rst @@ -152,6 +152,8 @@ infrastructure: +------------------------------+---------+---------+ | DIT | [51-48] | y | +------------------------------+---------+---------+ + | MPAM | [43-40] | n | + +------------------------------+---------+---------+ | SVE | [35-32] | y | +------------------------------+---------+---------+ | GIC | [27-24] | n | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f9777ce2ccb2..543396333e11 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1974,7 +1974,24 @@ config ARM64_TLB_RANGE The feature introduces new assembly instructions, and they were support when binutils >= 2.30. -endmenu # "ARMv8.4 architectural features" +config ARM64_MPAM + bool "Enable support for MPAM" + help + Memory Partitioning and Monitoring is an optional extension + that allows the CPUs to mark load and store transactions with + labels for partition-id and performance-monitoring-group. + System components, such as the caches, can use the partition-id + to apply a performance policy. MPAM monitors can use the + partition-id and performance-monitoring-group to measure the + cache occupancy or data throughput. + + Use of this extension requires CPU support, support in the + memory system components (MSC), and a description from firmware + of where the MSC are in the address space. + + MPAM is exposed to user-space via the resctrl pseudo filesystem. + +endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index e749838b9c5d..1cb5bafd9238 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -47,6 +47,7 @@ struct cpuinfo_arm64 { u64 reg_revidr; u64 reg_gmid; u64 reg_smidr; + u64 reg_mpamidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5bba39376055..e873848ad9d9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -619,6 +619,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1) return val > 0; } +static inline bool id_aa64pfr0_mpam(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + + return val > 0; +} + static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); @@ -831,6 +838,12 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } +static inline bool cpus_support_mpam(void) +{ + return IS_ENABLED(CONFIG_ARM64_MPAM) && + cpus_have_final_cap(ARM64_MPAM); +} + int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h new file mode 100644 index 000000000000..a4a969be233a --- /dev/null +++ b/arch/arm64/include/asm/mpam.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#ifndef __ASM__MPAM_H +#define __ASM__MPAM_H + +#include +#include +#include + +#include +#include +#include + +/* CPU Registers */ +#define MPAM_SYSREG_EN BIT_ULL(63) +#define MPAM_SYSREG_TRAP_IDR BIT_ULL(58) +#define MPAM_SYSREG_TRAP_MPAM0_EL1 BIT_ULL(49) +#define MPAM_SYSREG_TRAP_MPAM1_EL1 BIT_ULL(48) +#define MPAM_SYSREG_PMG_D GENMASK(47, 40) +#define MPAM_SYSREG_PMG_I GENMASK(39, 32) +#define MPAM_SYSREG_PARTID_D GENMASK(31, 16) +#define MPAM_SYSREG_PARTID_I GENMASK(15, 0) + +#define MPAMIDR_PMG_MAX GENMASK(40, 32) +#define MPAMIDR_PMG_MAX_SHIFT 32 +#define MPAMIDR_PMG_MAX_LEN 8 +#define MPAMIDR_VPMR_MAX GENMASK(20, 18) +#define MPAMIDR_VPMR_MAX_SHIFT 18 +#define MPAMIDR_VPMR_MAX_LEN 3 +#define MPAMIDR_HAS_HCR BIT(17) +#define MPAMIDR_HAS_HCR_SHIFT 17 +#define MPAMIDR_PARTID_MAX GENMASK(15, 0) +#define MPAMIDR_PARTID_MAX_SHIFT 0 +#define MPAMIDR_PARTID_MAX_LEN 15 + +#define MPAMHCR_EL0_VPMEN BIT_ULL(0) +#define MPAMHCR_EL1_VPMEN BIT_ULL(1) +#define MPAMHCR_GSTAPP_PLK BIT_ULL(8) +#define MPAMHCR_TRAP_MPAMIDR BIT_ULL(31) + +/* Properties of the VPM registers */ +#define MPAM_VPM_NUM_REGS 8 +#define MPAM_VPM_PARTID_LEN 16 +#define MPAM_VPM_PARTID_MASK 0xffff +#define MPAM_VPM_REG_LEN 64 +#define MPAM_VPM_PARTIDS_PER_REG (MPAM_VPM_REG_LEN / MPAM_VPM_PARTID_LEN) +#define MPAM_VPM_MAX_PARTID (MPAM_VPM_NUM_REGS * MPAM_VPM_PARTIDS_PER_REG) + + +DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); + +/* check whether all CPUs have MPAM support */ +static inline bool mpam_cpus_have_feature(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return cpus_have_final_cap(ARM64_MPAM); + return false; +} + +/* check whether all CPUs have MPAM virtualisation support */ +static inline bool mpam_cpus_have_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return static_branch_unlikely(&arm64_mpam_has_hcr); + return false; +} + +/* enable MPAM virtualisation support */ +static inline void __init __enable_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + static_branch_enable(&arm64_mpam_has_hcr); +} + +#endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd..94633246d311 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -515,6 +515,13 @@ #define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) #define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) + +#define __VPMn_op2(n) ((n) & 0x7) +#define SYS_MPAM_VPMn_EL2(n) sys_reg(3, 4, 10, 6, __VPMn_op2(n)) + #define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) #define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) #define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) @@ -579,6 +586,7 @@ #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_MPAM1_EL12 sys_reg(3, 5, 10, 5, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d48aa807dcce..7c67e2f29206 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -65,11 +65,13 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MPAM) += mpam.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 444a73c2e638..d3fbceaa6369 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -623,6 +624,18 @@ static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_mpamidr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_PMG_MAX_SHIFT, MPAMIDR_PMG_MAX_LEN, 0), /* PMG_MAX */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_VPMR_MAX_SHIFT, MPAMIDR_VPMR_MAX_LEN, 0), /* VPMR_MAX */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, + MPAMIDR_HAS_HCR_SHIFT, 1, 0), /* HAS_HCR */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_PARTID_MAX_SHIFT, MPAMIDR_PARTID_MAX_LEN, 0), /* PARTID_MAX */ + ARM64_FTR_END, +}; + /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -739,6 +752,9 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), + /* Op1 = 0, CRn = 10, CRm = 4 */ + ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr), + /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1058,6 +1074,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) vec_init_vq_map(ARM64_VEC_SME); } + if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) + init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr); + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); @@ -1317,6 +1336,11 @@ void update_cpu_features(int cpu, vec_update_vq_map(ARM64_VEC_SME); } + if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) { + taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu, + info->reg_mpamidr, boot->reg_mpamidr); + } + /* * The kernel uses the LDGM/STGM instructions and the number of tags * they read/write depends on the GMID_EL1.BS field. Check that the @@ -2239,6 +2263,39 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); } +static bool __maybe_unused +test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!has_cpuid_feature(entry, scope)) + return false; + + /* Check firmware actually enabled MPAM on this cpu. */ + return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM_SYSREG_EN); +} + +static void __maybe_unused +cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) +{ + /* + * Access by the kernel (at EL1) should use the reserved PARTID + * which is configured unrestricted. This avoids priority-inversion + * where latency sensitive tasks have to wait for a task that has + * been throttled to release the lock. + */ + write_sysreg_s(0, SYS_MPAM1_EL1); +} + +static void mpam_extra_caps(void) +{ + u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM)) + return; + + if (idr & MPAMIDR_HAS_HCR) + __enable_mpam_hcr(); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2719,6 +2776,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, +#ifdef CONFIG_ARM64_MPAM + { + .desc = "Memory Partitioning And Monitoring", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_MPAM, + .matches = test_has_mpam, + .cpu_enable = cpu_enable_mpam, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) + }, +#endif {}, }; @@ -3366,6 +3433,8 @@ void __init setup_cpu_features(void) if (!cwg) pr_warn("No Cache Writeback Granule information, assuming %d\n", ARCH_DMA_MINALIGN); + + mpam_extra_caps(); } static int enable_mismatched_32bit_el0(unsigned int cpu) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535..1b1fe0f58a86 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -460,6 +460,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) __cpuinfo_store_cpu_32bit(&info->aarch32); + if (IS_ENABLED(CONFIG_ARM64_MPAM) && + id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) + info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); + cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c new file mode 100644 index 000000000000..ff29b666e025 --- /dev/null +++ b/arch/arm64/kernel/mpam.c @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#include + +#include + +DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 5511bee15603..a7a5b67cf553 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -56,6 +56,7 @@ HW_DBM KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE +MPAM MTE MTE_ASYMM SME diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 76ce150e7347..0e7d7f327410 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2530,6 +2530,22 @@ Res0 1 Field 0 EN EndSysreg +Sysreg MPAMIDR_EL1 3 0 10 4 4 +Res0 63:62 +Field 61 HAS_SDEFLT +Field 60 HAS_FORCE_NS +Field 59 SP4 +Field 58 HAS_TIDR +Field 57 HAS_ALTSP +Res0 56:40 +Field 39:32 PMG_MAX +Res0 31:21 +Field 20:18 VPMR_MAX +Field 17 HAS_HCR +Res0 16 +Field 15:0 PARTID_MAX +EndSysreg + Sysreg LORID_EL1 3 0 10 4 7 Res0 63:24 Field 23:16 LD @@ -2537,6 +2553,22 @@ Res0 15:8 Field 7:0 LR EndSysreg +Sysreg MPAM1_EL1 3 0 10 5 0 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + +Sysreg MPAM0_EL1 3 0 10 5 1 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + Sysreg ISR_EL1 3 0 12 1 0 Res0 63:11 Field 10 IS @@ -2550,6 +2582,7 @@ EndSysreg Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 Res0 63:24 Field 23:0 INTID + EndSysreg Sysreg TRBLIMITR_EL1 3 0 9 11 0 -- Gitee From e2669393367d75f1624944cd7a0086995e648ae6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 21 Mar 2019 15:59:41 +0000 Subject: [PATCH 596/953] KVM: arm64: Fix missing traps of guest accesses to the MPAM registers ANBZ: #8686 commit 055409b8aafcca81a2dfa43052142a3b0f416816 morse-linux. commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. If you are unlucky, this results in an MPAM aware guest being delivered an undef during boot. The host prints: | kvm [97]: Unsupported guest sys_reg access at: ffff800080024c64 [00000005] | { Op0( 3), Op1( 0), CRn(10), CRm( 5), Op2( 0), func_read }, Which results in: | Internal error: Oops - Undefined instruction: 0000000002000000 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.6.0-rc7-00559-gd89c186d50b2 #14616 | Hardware name: linux,dummy-virt (DT) | pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : test_has_mpam+0x18/0x30 | lr : test_has_mpam+0x10/0x30 | sp : ffff80008000bd90 ... | Call trace: | test_has_mpam+0x18/0x30 | update_cpu_capabilities+0x7c/0x11c | setup_cpu_features+0x14/0xd8 | smp_cpus_done+0x24/0xb8 | smp_init+0x7c/0x8c | kernel_init_freeable+0xf8/0x280 | kernel_init+0x24/0x1e0 | ret_from_fork+0x10/0x20 | Code: 910003fd 97ffffde 72001c00 54000080 (d538a500) | ---[ end trace 0000000000000000 ]--- | Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b | ---[ end Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b ]--- Add the support to enable the traps, and handle the three guest accessible registers as RAZ/WI. This allows guests to keep the invariant id-register value, while advertising that MPAM isn't really supported. With MPAM v1.0 we can trap the MPAMIDR_EL1 register only if ARM64_HAS_MPAM_HCR, with v1.1 an additional MPAM2_EL2.TIDR bit traps MPAMIDR_EL1 on platforms that don't have MPAMHCR_EL2. Enable one of these if either is supported. If neither is supported, the guest can discover that the CPU has MPAM support, and how many PARTID etc the host has ... but it can't influence anything, so its harmless. Full support for the feature would only expose MPAM to the guest if a psuedo-device has been created to describe the virt->phys partid mapping the VMM expects. This will depend on ARM64_HAS_MPAM_HCR. Fixes: 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") CC: Anshuman Khandual Link: https://lore.kernel.org/linux-arm-kernel/20200925160102.118858-1-james.morse@arm.com/ Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/mpam.h | 4 ++-- arch/arm64/kernel/image-vars.h | 5 ++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++ 5 files changed, 60 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1095c6647e96..2d8b243a86cd 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -104,6 +104,7 @@ #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) +#define MPAMHCR_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index a4a969be233a..576102d510ad 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); /* check whether all CPUs have MPAM support */ -static inline bool mpam_cpus_have_feature(void) +static __always_inline bool mpam_cpus_have_feature(void) { if (IS_ENABLED(CONFIG_ARM64_MPAM)) return cpus_have_final_cap(ARM64_MPAM); @@ -59,7 +59,7 @@ static inline bool mpam_cpus_have_feature(void) } /* check whether all CPUs have MPAM virtualisation support */ -static inline bool mpam_cpus_have_mpam_hcr(void) +static __always_inline bool mpam_cpus_have_mpam_hcr(void) { if (IS_ENABLED(CONFIG_ARM64_MPAM)) return static_branch_unlikely(&arm64_mpam_has_hcr); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c7959513..d10d3fed31d9 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,6 +64,11 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); +/* Additional static keys for cpufeatures */ +#ifdef CONFIG_ARM64_MPAM +KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +#endif + /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe4..657320f453e6 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } +static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) +{ + u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; + + if (!mpam_cpus_have_feature()) + return; + + /* trap guest access to MPAMIDR_EL1 */ + if (mpam_cpus_have_mpam_hcr()) { + write_sysreg_s(MPAMHCR_TRAP_MPAMIDR, SYS_MPAMHCR_EL2); + } else { + /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */ + r |= MPAM_SYSREG_TRAP_IDR; + } + + write_sysreg_s(r, SYS_MPAM2_EL2); +} + +static inline void __deactivate_traps_mpam(void) +{ + if (!mpam_cpus_have_feature()) + return; + + write_sysreg_s(0, SYS_MPAM2_EL2); + + if (mpam_cpus_have_mpam_hcr()) + write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2); +} + static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ @@ -212,6 +242,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) } __activate_traps_hfgxtr(vcpu); + __activate_traps_mpam(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -231,6 +262,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); __deactivate_traps_hfgxtr(vcpu); + __deactivate_traps_mpam(); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0afd6136e275..0a6e5386107e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -411,6 +411,23 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } +static bool workaround_bad_mpam_abi(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + /* + * The ID register can't be removed without breaking migration, + * but MPAMIDR_EL1 can advertise all-zeroes, indicating there are zero + * PARTID/PMG supported by the CPU, allowing the other two trapped + * registers (MPAM1_EL1 and MPAM0_EL1) to be treated as RAZ/WI. + * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit + * as clear, and realises MPAM isn't usable on this CPU. + */ + p->regval = 0; + + return true; +} + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -2130,8 +2147,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAMIDR_EL1), workaround_bad_mpam_abi }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAM1_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAM0_EL1), workaround_bad_mpam_abi }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, -- Gitee From 98feaf38dd953e3172ba5f783757d1e3d1d7ced2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 23 Nov 2023 16:22:12 +0000 Subject: [PATCH 597/953] KVM: arm64: Disable MPAM visibility by default, and handle traps ANBZ: #8686 commit 9c4f586b73e6dbb1d6ad1e3eeb9ee75befeab4c1 morse-linux. Currently KVM only allows writeable ID registers to be downgraded in the 'safe' direction, as determined by the cpufeature 'lower safe' flags. commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. A previous patch supplied the missing trap handling. Existing VMs that have the MPAM field of AA64PFR0_EL1 need to be migratable, but there is little point enabling the MPAM CPU interface on new VMs until there is something a guest can do with it. Clear the MPAM field from the guest's AA64PFR0_EL1 by default, but allow user-space to set it again if the host supports MPAM. Add a helper to return the maximum permitted value for an ID register. For most this is the reset value. To allow the MPAM field to be written as supported, check if the host sanitised value is '1' and upgrade the reset value. Finally, change the trap handling to inject an undef if MPAM was not advertised to the guest. Full support will depend on an psuedo-device being created that describes the virt->phys PARTID mapping the VMM expects. Migration would be expected to fail if this psuedo-device can't be created on the remote end. This ID bit isn't needed to block migration. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kvm/sys_regs.c | 74 +++++++++++++++++++++++++++++++-------- 1 file changed, 60 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0a6e5386107e..9424fa7351bf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -411,21 +411,29 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } -static bool workaround_bad_mpam_abi(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +static bool trap_mpam(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { + u64 aa64pfr0_el1 = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); + /* - * The ID register can't be removed without breaking migration, - * but MPAMIDR_EL1 can advertise all-zeroes, indicating there are zero - * PARTID/PMG supported by the CPU, allowing the other two trapped - * registers (MPAM1_EL1 and MPAM0_EL1) to be treated as RAZ/WI. + * What did we expose to the guest? + * Earlier guests may have seen the ID bits, which can't be removed + * without breaking migration, but MPAMIDR_EL1 can advertise all-zeroes, + * indicating there are zero PARTID/PMG supported by the CPU, allowing + * the other two trapped registers (MPAM1_EL1 and MPAM0_EL1) to be + * treated as RAZ/WI. * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit * as clear, and realises MPAM isn't usable on this CPU. */ - p->regval = 0; + if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, aa64pfr0_el1)) { + p->regval = 0; + return true; + } - return true; + kvm_inject_undefined(vcpu); + return false; } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, @@ -1245,6 +1253,36 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, return arm64_ftr_safe_value(&kvm_ftr, new, cur); } +static u64 kvm_arm64_ftr_max(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 pfr0, val = rd->reset(vcpu, rd); + u32 field, id = reg_to_encoding(rd); + + /* + * Some values may reset to a lower value than can be supported, + * get the maximum feature value. + */ + switch (id) { + case SYS_ID_AA64PFR0_EL1: + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * MPAM resets to 0, but migration of MPAM=1 guests is needed. + * See trap_mpam() for more. + */ + field = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + if (field == ID_AA64PFR0_EL1_MPAM_1) { + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, ID_AA64PFR0_EL1_MPAM_1); + } + + break; + } + + return val; +} + /** * arm64_check_features() - Check if a feature register value constitutes * a subset of features indicated by the idreg's KVM sanitised limit. @@ -1265,8 +1303,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, const struct arm64_ftr_bits *ftrp = NULL; u32 id = reg_to_encoding(rd); u64 writable_mask = rd->val; - u64 limit = rd->reset(vcpu, rd); - u64 mask = 0; + u64 limit, mask = 0; /* * Hidden and unallocated ID registers may not have a corresponding @@ -1280,6 +1317,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, if (!ftr_reg) return -EINVAL; + limit = kvm_arm64_ftr_max(vcpu, rd); ftrp = ftr_reg->ftr_bits; for (; ftrp && ftrp->width; ftrp++) { @@ -1483,6 +1521,14 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, val &= ~ID_AA64PFR0_EL1_AMU_MASK; + /* + * MPAM is disabled by default as KVM also needs a set of PARTID to + * program the MPAMVPMx_EL2 PARTID remapping registers with. But some + * older kernels let the guest see the ID bit. Turning it on causes + * the registers to be emulated as RAZ/WI. See trap_mpam() for more. + */ + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + return val; } @@ -2147,11 +2193,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, - { SYS_DESC(SYS_MPAMIDR_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAMIDR_EL1), trap_mpam }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, - { SYS_DESC(SYS_MPAM1_EL1), workaround_bad_mpam_abi }, - { SYS_DESC(SYS_MPAM0_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAM1_EL1), trap_mpam }, + { SYS_DESC(SYS_MPAM0_EL1), trap_mpam }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, -- Gitee From 6b8e5a90c2db30da24a9397a752ae27373cd7e37 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 12:49:18 +0000 Subject: [PATCH 598/953] arm64: mpam: Context switch the MPAM registers ANBZ: #8686 commit d3220b56803a826f5f9702e27fe7d24eb2be0c39 morse-linux. MPAM has a system register that is used to hold the partid and pmg values that traffic generated by EL0 will use. This can be set per-task by the resctrl file system. Add a helper to switch this. resctrl expects a 'default' value to be used in preference if the default partid and pmg are selected. struct task_struct's separate closid and rmid fields are insufficient to implement resctrl using MPAM, as resctrl can change the partid (closid) and pmg (sort of like the rmid) separately. On x86, the rmid is an independent number, so a race that writes a mismatched closid and rmid into hardware is benign. On arm64, the pmg bits extend the partid. (i.e. partid-5 has a pmg-0 that is not the same as partid-6's pmg-0). In this case, mismatching the values will 'dirty' a pmg value that resctrl believes is clean, and is not tracking with its 'limbo' code. To avoid this, the partid and pmg are always read and written as a pair. Instead of making struct task_struct's closid and rmid fields an endian-unsafe union, add the value to struct thread_info and always use READ_ONCE()/WRITE_ONCE() when accessing this field. CC: Amit Singh Tomar Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/mpam.h | 46 ++++++++++++++++++++++++++++ arch/arm64/include/asm/thread_info.h | 3 ++ arch/arm64/kernel/mpam.c | 4 +++ arch/arm64/kernel/process.c | 7 +++++ 4 files changed, 60 insertions(+) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index 576102d510ad..1d81a6f26acd 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -49,6 +51,9 @@ DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DECLARE_STATIC_KEY_FALSE(mpam_enabled); +DECLARE_PER_CPU(u64, arm64_mpam_default); +DECLARE_PER_CPU(u64, arm64_mpam_current); /* check whether all CPUs have MPAM support */ static __always_inline bool mpam_cpus_have_feature(void) @@ -73,4 +78,45 @@ static inline void __init __enable_mpam_hcr(void) static_branch_enable(&arm64_mpam_has_hcr); } +/* + * The resctrl filesystem writes to the partid/pmg values for threads and CPUs, + * which may race with reads in __mpam_sched_in(). Ensure only one of the old + * or new values are used. Particular care should be taken with the pmg field + * as __mpam_sched_in() may read a partid and pmg that don't match, causing + * this value to be stored with cache allocations, despite being considered + * 'free' by resctrl. + * + * A value in struct thread_info is used instead of struct task_struct as the + * cpu's u64 register format is used, but struct task_struct has two u32'. + */ +static inline u64 mpam_get_regval(struct task_struct *tsk) +{ +#ifdef CONFIG_ARM64_MPAM + return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg); +#else + return 0; +#endif +} + +static inline void mpam_thread_switch(struct task_struct *tsk) +{ + u64 oldregval; + int cpu = smp_processor_id(); + u64 regval = mpam_get_regval(tsk); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM) || + !static_branch_likely(&mpam_enabled)) + return; + + if (!regval) + regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); + + oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + if (oldregval == regval) + return; + + /* Synchronising this write is left until the ERET to EL0 */ + write_sysreg_s(regval, SYS_MPAM0_EL1); + WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval); +} #endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 553d1bc559c6..c57b33de0ed1 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -41,6 +41,9 @@ struct thread_info { #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; void *scs_sp; +#endif +#ifdef CONFIG_ARM64_MPAM + u64 mpam_partid_pmg; #endif u32 cpu; }; diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index ff29b666e025..346f0273b2c5 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -4,5 +4,9 @@ #include #include +#include DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DEFINE_STATIC_KEY_FALSE(mpam_enabled); +DEFINE_PER_CPU(u64, arm64_mpam_default); +DEFINE_PER_CPU(u64, arm64_mpam_current); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0fcc4eb1a7ab..34f39e4aa4d1 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -552,6 +553,12 @@ struct task_struct *__switch_to(struct task_struct *prev, if (prev->thread.sctlr_user != next->thread.sctlr_user) update_sctlr_el1(next->thread.sctlr_user); + /* + * MPAM thread switch happens after the DSB to ensure prev's accesses + * use prev's MPAM settings. + */ + mpam_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); -- Gitee From 580b716efab5b38497f51dab668272a75888253a Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 Dec 2018 17:04:48 +0000 Subject: [PATCH 599/953] untested: KVM: arm64: Force guest EL1 to use user-space's partid configuration ANBZ: #8686 commit 0b21ba1c38b70420a9d7819012938d99273b8a13 morse-linux. While we trap the guest's attempts to read/write the MPAM control registers, these remain in effect. guest-EL0 uses KVM's user-space's configuration, as the value is left in the register, and guest-EL1 uses either the host kernel's configuration, or in the case of VHE, the unknown reset value of MPAM1_EL1. On nVHE systems, EL2 continues to use partid-0 for world-switch, even when the host may have configured its kernel threads to use a different partid. 0 may have been assigned to another task. We want to force the guest-EL1 to use KVM's user-space's MPAM configuration. On a nVHE system, copy the EL1 MPAM register to EL2. This ensures world-switch uses the same partid as the kernel thread does on the host. When loading the guests EL1 registers, copy the VMM's EL0 partid to the EL1 register. When restoring the hosts registers, the partid previously copied to EL2 can be used to restore EL1. For VHE systems, we can skip restoring the EL1 register for the host, as it is out-of-context once HCR_EL2.TGE is set. This is done outside the usual sysreg save/restore as the values can change behind KVMs back, so should not be stored in the guest context. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 27 ++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 11 +++++++++ arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 1 + 3 files changed, 39 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627..c8767abd693e 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -15,6 +15,7 @@ #include #include #include +#include static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { @@ -243,4 +244,30 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); } +/* + * The _EL0 value was written by the host's context switch, copy this into the + * guest's EL1. + */ +static inline void __mpam_guest_load(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); +} + +/* + * Copy the _EL2 register back to _EL1, clearing any trap bits EL2 may have set. + * nVHE world-switch copies the _EL1 register to _EL2. A VHE host writes to the + * _EL2 register as it is aliased by the hardware when TGE is set. + */ +static inline void __mpam_guest_put(void) +{ + u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | + MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; + + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) { + val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); + write_sysreg_el1(val, SYS_MPAM1); + } +} + #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6..c2118f658e22 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,6 +242,13 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) } } +/* Use the host thread's partid and pmg for world switch */ +static void __mpam_copy_el1_to_el2(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -251,6 +258,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) bool pmu_switch_needed; u64 exit_code; + __mpam_copy_el1_to_el2(); + /* * Having IRQs masked via PMR when entering the guest means the GIC * will not signal the CPU of interrupts of lower priority, and the @@ -310,6 +319,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __timer_enable_traps(vcpu); __debug_switch_to_guest(vcpu); + __mpam_guest_load(); do { /* Jump in the fire! */ @@ -320,6 +330,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); + __mpam_guest_put(); __timer_disable_traps(vcpu); __hyp_vgic_save_state(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e0d..6b407cd3230d 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -90,6 +90,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); + __mpam_guest_load(); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); -- Gitee From 3fed85679c1911e21287df21e99ea8f2b1092cec Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 14:33:19 +0000 Subject: [PATCH 600/953] ACPI / PPTT: Provide a helper to walk processor containers ANBZ: #8686 commit 140a693dd7a214179f67cb3dcf36f8fd9467e5e6 morse-linux. The PPTT describes CPUs and caches, as well as processor containers. To enable PPI partitions, the irqchip driver needs to know how many partitions the platform has, and which CPUs belong to which partition. When a percpu interrupt is registered, the partition is provided to allow a different driver to request the same percpu interrupt intid, one per partition. The acpi_id of the Processor Container is the natural way to do this with ACPI, but the DSDT AML interpreter is not available early enough for the irqchipi driver. Fortunately, the same information can be described in the PPTT. Add a helper to count the number or Processor Containers in the PPTT. This is structured as a walker/callback as the irqchip driver will also use this to configure each partition. Only Processor entries in the PPTT that have a valid acpi id are considered as containers. To identify a particular Processor Container, it must have an id. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 58 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index a35dd0e41c27..f3279bcb78ff 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -21,6 +21,8 @@ #include #include +typedef int (*acpi_pptt_cpu_callback_t)(struct acpi_pptt_processor *, void *); + static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, u32 pptt_ref) { @@ -293,6 +295,62 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/** + * acpi_pptt_for_each_container() - Iterate over all processor containers + * + * Not all 'Processor' entries in the PPTT are either a CPU or a Processor + * Container, they may exist purely to describe a Private resource. CPUs + * have to be leaves, so a Processor Container is a non-leaf that has the + * 'ACPI Processor ID valid' flag set. + * + * Return: 0 for a complete walk, or the first non-zero value from the callback + * that stopped the walk. + */ +int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + struct acpi_subtable_header *entry; + bool leaf_flag, has_leaf_flag = false; + unsigned long table_end; + acpi_status status; + u32 proc_sz; + int ret = 0; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return 0; + + if (table_hdr->revision > 1) + has_leaf_flag = true; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) + { + leaf_flag = cpu_node->flags & ACPI_PPTT_ACPI_LEAF_NODE; + if ((has_leaf_flag && !leaf_flag) || + (!has_leaf_flag && !acpi_pptt_leaf_node(table_hdr, cpu_node))) + { + ret = callback(cpu_node, arg); + if (ret) + break; + } + } + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + acpi_put_table(table_hdr); + + return ret; +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { -- Gitee From ed91f33b51f17008378b27ee5ddb589bce718d69 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 30 Nov 2020 13:29:56 +0000 Subject: [PATCH 601/953] ACPI / PPTT: Find PPTT cache level by ID ANBZ: #8686 commit 7bf596309dc800428c5ca5bfabe5053e45e47cfb morse-linux. The MPAM table identifies caches by id, but the driver also wants to know the cache level, without having to wait for whichever core has that cache to come online. Add a helper that walks every possible cache, until it finds the one identified by id, then return the level. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 74 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 5 +++ 2 files changed, 79 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f3279bcb78ff..6c1f668d63ba 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -870,3 +870,77 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu) return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, ACPI_PPTT_ACPI_IDENTICAL); } + + +/** + * find_acpi_cache_level_from_id() - Get the level of the specified cache + * @cache_id: The id field of the unified cache + * + * Determine the level relative to any CPU for the unified cache identified by + * cache_id. This allows the property to be found even if the CPUs are offline. + * + * The returned level can be used to group unified caches that are peers. + * + * The PPTT table must be rev 3 or later, + * + * If one CPUs L2 is shared with another as L3, this function will return + * and unpredictable value. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns a value which represents the level of the specified cache. + */ +int find_acpi_cache_level_from_id(u32 cache_id) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each CPU + * to find the level... + */ + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + acpi_put_table(table); + return level; + } + } + } + + acpi_put_table(table); + return -ENOENT; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ba2f96c9a574..4304299d5218 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1495,6 +1495,7 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +int find_acpi_cache_level_from_id(u32 cache_id); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1516,6 +1517,10 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cache_level_from_id(u32 cache_id) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 8f7ab09a0e597f10bb580527334fe5350e304195 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 14:40:25 +0000 Subject: [PATCH 602/953] ACPI / PPTT: Add a helper to fill a cpumask from a processor container ANBZ: #8686 commit 3c920715e3e3bb5cd28b697db9103e482359861a morse-linux. The ACPI table for MPAM describes a set of CPUs with the UID of a processor container. These exist both in the namespace and the PPTT. Using the existing for-each helpers, provide a helper to find the specified processor container in the PPTT, and fill a cpumask with the CPUs that belong to it. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 67 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 6 ++++ 2 files changed, 73 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 6c1f668d63ba..9fbeed61cd87 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -295,6 +295,38 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* parent_node points into the table, but the table isn't provided. */ +static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, + cpumask_t *cpus) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + acpi_status status; + u32 acpi_id; + int cpu; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return; + + for_each_possible_cpu(cpu) { + acpi_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table_hdr, acpi_id); + + while (cpu_node) { + if (cpu_node == parent_node) { + cpumask_set_cpu(cpu, cpus); + break; + } + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + } + + acpi_put_table(table_hdr); + + return; +} + /** * acpi_pptt_for_each_container() - Iterate over all processor containers * @@ -351,6 +383,41 @@ int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) return ret; } +struct __cpus_from_container_arg { + u32 acpi_cpu_id; + cpumask_t *cpus; +}; + +static int __cpus_from_container(struct acpi_pptt_processor *container, void *arg) +{ + struct __cpus_from_container_arg *params = arg; + + if (container->acpi_processor_id == params->acpi_cpu_id) + acpi_pptt_get_child_cpus(container, params->cpus); + + return 0; +} + +/** + * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a + * processor containers + * + * Find the specified Processor Container, and fill cpus with all the cpus + * below it. + * + * Return: 0 for a complete walk, or an error if the mask is incomplete. + */ +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus) +{ + struct __cpus_from_container_arg params; + + params.acpi_cpu_id = acpi_cpu_id; + params.cpus = cpus; + + cpumask_clear(cpus); + return acpi_pptt_for_each_container(&__cpus_from_container, ¶ms); +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4304299d5218..133b753235b7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1496,6 +1496,7 @@ int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1521,6 +1522,11 @@ static inline int find_acpi_cache_level_from_id(u32 cache_id) { return -EINVAL; } +static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 3a8a60974e0da3d137f4accd161e9f8c5bc866dc Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 19 May 2021 15:16:28 +0100 Subject: [PATCH 603/953] ACPI / PPTT: Add a helper to fill a cpumask from a cache_id ANBZ: #8686 commit 90e73e8b3b6219b34e875b0c866f94aae84c8952 morse-linux. MPAM identifies CPUs by the cache_id in the PPTT cache structure. The driver needs to know which CPUs are associated with the cache, the CPUs may not all be online, so cacheinfo does not have the information. Add a helper to pull this information out of the PPTT. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 79 ++++++++++++++++++++++++++++++++++++++++++-- include/linux/acpi.h | 6 ++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 9fbeed61cd87..201df60e4a79 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -183,9 +183,10 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for - * @levels: Number of levels if success. + * @levels: Number of levels if success. (*levels) should be initialized by + * the caller with the value to be used as the starting level. * @split_levels: Number of split cache levels (data/instruction) if - * success. Can by NULL. + * success. Can be NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit @@ -982,6 +983,8 @@ int find_acpi_cache_level_from_id(u32 cache_id) * to find the level... */ for_each_possible_cpu(cpu) { + + num_levels = 0; acpi_cpu_id = get_acpi_id_for_cpu(cpu); cpu_node = acpi_find_processor_node(table, acpi_cpu_id); if (!cpu_node) @@ -1011,3 +1014,75 @@ int find_acpi_cache_level_from_id(u32 cache_id) acpi_put_table(table); return -ENOENT; } + +/** + * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the + * specified cache + * @cache_id: The id field of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each cpu. + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + cpumask_set_cpu(cpu, cpus); + } + } + } + + acpi_put_table(table); + return 0; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 133b753235b7..b14c81759fd8 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1497,6 +1497,7 @@ int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1527,6 +1528,11 @@ static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, { return -EINVAL; } +static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 65bf070ff2a4c1043182c21c3cf22add81d72f88 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 4 Dec 2023 19:09:38 +0000 Subject: [PATCH 604/953] cacheinfo: Allow for >32-bit cache 'id' ANBZ: #8686 commit 48ce26a34bccf6aff649ce9e3318b6d6aca94a03 morse-linux. In preparation to set the cache 'id' based on the CPU h/w ids, allow for 64-bit bit 'id' value. The only case that needs this is arm64, so unsigned long is sufficient. Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Rob Herring [ Update get_cpu_cacheinfo_id() too. Use UL instead of ULL. ] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 8 +++++++- include/linux/cacheinfo.h | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cbae8be1fe52..16df072b1477 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -620,13 +620,19 @@ static ssize_t file_name##_show(struct device *dev, \ return sysfs_emit(buf, "%u\n", this_leaf->object); \ } -show_one(id, id); show_one(level, level); show_one(coherency_line_size, coherency_line_size); show_one(number_of_sets, number_of_sets); show_one(physical_line_partition, physical_line_partition); show_one(ways_of_associativity, ways_of_associativity); +static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", this_leaf->id); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab45..7aa8da587f92 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -47,7 +47,7 @@ extern unsigned int coherency_max_size; * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { - unsigned int id; + unsigned long id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -115,7 +115,7 @@ const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); * Get the id of the cache associated with @cpu at level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; @@ -124,11 +124,11 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) return ci->info_list[i].id; - return -1; + return ~0UL; } } - return -1; + return ~0UL; } #ifdef CONFIG_ARM64 -- Gitee From cb0bf24ed757a49cc06c92b66432e5b3dd2a0c7e Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 6 Oct 2021 11:43:32 -0500 Subject: [PATCH 605/953] cacheinfo: Set cache 'id' based on DT data ANBZ: #8686 commit 594097510201724732eb99aaf68d838fcfcc3809 morse-linux. Use the minimum CPU h/w id of the CPUs associated with the cache for the cache 'id'. This will provide a stable id value for a given system. As we need to check all possible CPUs, we can't use the shared_cpu_map which is just online CPUs. As there's not a cache to CPUs mapping in DT, we have to walk all CPU nodes and then walk cache levels. Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Rob Herring Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 16df072b1477..d4c0309e24d6 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,6 +183,31 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + struct device_node *cpu; + unsigned long min_id = ~0UL; + + for_each_of_cpu_node(cpu) { + struct device_node *cache_node = cpu; + u64 id = of_get_cpu_hwid(cache_node, 0); + + while ((cache_node = of_find_next_cache_node(cache_node))) { + if ((cache_node == np) && (id < min_id)) { + min_id = id; + of_node_put(cache_node); + break; + } + of_node_put(cache_node); + } + } + + if (min_id != ~0UL) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +223,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) -- Gitee From 1e1c5caf8f2175db89429b4d8f8f7838abd69dc9 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Oct 2021 16:04:55 +0100 Subject: [PATCH 606/953] cacheinfo: Expose the code to generate a cache-id from a device_node ANBZ: #8686 commit 6ade1c87b850595b164455a76a569a67bbf57f97 morse-linux. The MPAM driver identifies caches by id for use with resctrl. It needs to know the cache-id when probeing, but the value isn't set in cacheinfo until device_initcall(). Expose the code that generates the cache-id. The parts of the MPAM driver that run early can use this to set up the resctrl structures. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 13 ++++++++++--- include/linux/cacheinfo.h | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index d4c0309e24d6..6f9087c6aef7 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,7 +183,7 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } -static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +unsigned long cache_of_get_id(struct device_node *np) { struct device_node *cpu; unsigned long min_id = ~0UL; @@ -202,8 +202,15 @@ static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) } } - if (min_id != ~0UL) { - this_leaf->id = min_id; + return min_id; +} + +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + unsigned long id = cache_of_get_id(np); + + if (id != ~0UL) { + this_leaf->id = id; this_leaf->attributes |= CACHE_ID; } } diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 7aa8da587f92..d343754aacfb 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -110,6 +110,7 @@ int acpi_get_cache_info(unsigned int cpu, #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); +unsigned long cache_of_get_id(struct device_node *np); /* * Get the id of the cache associated with @cpu at level @level. -- Gitee From e4c83ff2d65838add9e80bb33de349d616d9c143 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:57:05 +0100 Subject: [PATCH 607/953] drivers: base: cacheinfo: Add helper to find the cache size from cpu+level ANBZ: #8686 commit 3a64a1d7d94c23c46bdea89071c9f1681d6167d1 morse-linux. MPAM needs to know the size of a cache associated with a particular CPU. The DT/ACPI agnostic way of doing this is to ask cacheinfo. Add a helper to do this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- include/linux/cacheinfo.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index d343754aacfb..467ec6a76567 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -132,6 +132,27 @@ static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) return ~0UL; } +/* + * Get the size of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline unsigned int get_cpu_cacheinfo_size(int cpu, int level) +{ + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); + int i; + + if (!ci->info_list) + return 0; + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == level) { + return ci->info_list[i].size; + } + } + + return 0; +} + #ifdef CONFIG_ARM64 #define use_arch_cache_info() (true) #else -- Gitee From aee8331eb33b199ebfbe5c092dea2c61a3b91bfa Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 19:11:19 +0000 Subject: [PATCH 608/953] ACPI / MPAM: Parse the MPAM table ANBZ: #8686 commit 27b4ec98977590b385ce4128dcaf95df802ebead morse-linux. Add code to parse the arm64 specific MPAM table, looking up the cache level from the PPTT and feeding the end result into the MPAM driver. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/Kconfig | 1 + drivers/acpi/arm64/Kconfig | 3 + drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/mpam.c | 369 ++++++++++++++++++++++++++++++++++++ drivers/acpi/tables.c | 2 +- include/linux/arm_mpam.h | 43 +++++ 6 files changed, 418 insertions(+), 1 deletion(-) create mode 100644 drivers/acpi/arm64/mpam.c create mode 100644 include/linux/arm_mpam.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 543396333e11..66b4475c1972 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1976,6 +1976,7 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" + select ACPI_MPAM if ACPI help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index b3ed6212244c..f2fd79f22e7d 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -21,3 +21,6 @@ config ACPI_AGDI config ACPI_APMT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 143debc1ba4a..a55d16c01c50 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ARM_AMBA) += amba.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o obj-y += dma.o init.o diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 000000000000..8a63449f27b5 --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ + +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include +#include +#include +#include +#include + +#include + +#include + +/* Flags for acpi_table_mpam_msc.*_interrupt_flags */ +#define ACPI_MPAM_MSC_IRQ_MODE_EDGE 1 +#define ACPI_MPAM_MSC_IRQ_TYPE_MASK (3<<1) +#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) +#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) + +static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, + int *irq, u32 processor_container_uid) +{ + int sense; + + if (!intid) + return false; + + /* 0 in this field indicates a wired interrupt */ + if (flags & ACPI_MPAM_MSC_IRQ_TYPE_MASK) + return false; + + if (flags & ACPI_MPAM_MSC_IRQ_MODE_EDGE) + sense = ACPI_EDGE_SENSITIVE; + else + sense = ACPI_LEVEL_SENSITIVE; + + /* + * If the GSI is in the GIC's PPI range, try and create a partitioned + * percpu interrupt. + */ + if (16 <= intid && intid < 32 && processor_container_uid != ~0) { + pr_err_once("Partitioned interrupts not supported\n"); + return false; + } else { + *irq = acpi_register_gsi(&pdev->dev, intid, sense, + ACPI_ACTIVE_HIGH); + } + if (*irq <= 0) { + pr_err_once("Failed to register interrupt 0x%x with ACPI\n", + intid); + return false; + } + + return true; +} + +static void acpi_mpam_parse_irqs(struct platform_device *pdev, + struct acpi_mpam_msc_node *tbl_msc, + struct resource *res, int *res_idx) +{ + u32 flags, aff = ~0; + int irq; + + flags = tbl_msc->overflow_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->overflow_interrupt_affinity; + if (frob_irq(pdev, tbl_msc->overflow_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "overflow"; + + (*res_idx)++; + } + + flags = tbl_msc->error_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->error_interrupt_affinity; + else + aff = ~0; + if (frob_irq(pdev, tbl_msc->error_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "error"; + + (*res_idx)++; + } +} + +static int acpi_mpam_parse_resource(struct mpam_msc *msc, + struct acpi_mpam_resource_node *res) +{ + u32 cache_id; + int level; + + switch (res->locator_type) { + case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: + cache_id = res->locator.cache_locator.cache_reference; + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, + level, cache_id); + case ACPI_MPAM_LOCATION_TYPE_MEMORY: + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, + 255, res->locator.memory_locator.proximity_domain); + default: + /* These get discovered later and treated as unknown */ + return 0; + } +} + +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + int i, err; + struct acpi_mpam_resource_node *resources; + + resources = (struct acpi_mpam_resource_node *)(tbl_msc + 1); + for (i = 0; i < tbl_msc->num_resouce_nodes; i++) { + err = acpi_mpam_parse_resource(msc, &resources[i]); + if (err) + return err; + } + + return 0; +} + +static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, + struct platform_device *pdev, + u32 *acpi_id) +{ + bool acpi_id_valid = false; + struct acpi_device *buddy; + char hid[16], uid[16]; + int err; + + memset(&hid, 0, sizeof(hid)); + memcpy(hid, &tbl_msc->hardware_id_linked_device, + sizeof(tbl_msc->hardware_id_linked_device)); + + if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { + *acpi_id = tbl_msc->instance_id_linked_device; + acpi_id_valid = true; + } + + err = snprintf(uid, sizeof(uid), "%u", + tbl_msc->instance_id_linked_device); + if (err < 0 || err >= sizeof(uid)) + return acpi_id_valid; + + buddy = acpi_dev_get_first_match_dev(hid, uid, -1); + if (buddy) { + device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); + } + + return acpi_id_valid; +} + +static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, + enum mpam_msc_iface *iface) +{ + switch (tbl_msc->interface_type){ + case 0: + *iface = MPAM_IFACE_MMIO; + return 0; + case 1: + *iface = MPAM_IFACE_PCC; + return 0; + default: + return -EINVAL; + } +} + +static int __init _parse_table(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct property_entry props[4]; /* needs a sentinel */ + struct acpi_mpam_msc_node *tbl_msc; + int next_res, next_prop, err = 0; + struct acpi_device *companion; + struct platform_device *pdev; + enum mpam_msc_iface iface; + struct resource res[3]; + char uid[16]; + u32 acpi_id; + + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_offset += tbl_msc->length; + + /* + * If any of the reserved fields are set, make no attempt to + * parse the msc structure. This will prevent the driver from + * probing all the MSC, meaning it can't discover the system + * wide supported partid and pmg ranges. This avoids whatever + * this MSC is truncating the partids and creating a screaming + * error interrupt. + */ + if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) + continue; + + if (decode_interface_type(tbl_msc, &iface)) + continue; + + next_res = 0; + next_prop = 0; + memset(res, 0, sizeof(res)); + memset(props, 0, sizeof(props)); + + pdev = platform_device_alloc("mpam_msc", tbl_msc->identifier); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + break; + } + + if (tbl_msc->length < sizeof(*tbl_msc)) { + err = -EINVAL; + break; + } + + /* Some power management is described in the namespace: */ + err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); + if (err > 0 && err < sizeof(uid)) { + companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); + if (companion) + ACPI_COMPANION_SET(&pdev->dev, companion); + } + + if (iface == MPAM_IFACE_MMIO) { + res[next_res].name = "MPAM:MSC"; + res[next_res].start = tbl_msc->base_address; + res[next_res].end = tbl_msc->base_address + tbl_msc->mmio_size - 1; + res[next_res].flags = IORESOURCE_MEM; + next_res++; + } else if (iface == MPAM_IFACE_PCC) { + props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", + tbl_msc->base_address); + next_prop++; + } + + acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); + err = platform_device_add_resources(pdev, res, next_res); + if (err) + break; + + props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", + tbl_msc->max_nrdy_usec); + + /* + * The MSC's CPU affinity is described via its linked power + * management device, but only if it points at a Processor or + * Processor Container. + */ + if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) { + props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", + acpi_id); + } + + err = device_create_managed_software_node(&pdev->dev, props, + NULL); + if (err) + break; + + /* Come back later if you want the RIS too */ + err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); + if (err) + break; + + platform_device_add(pdev); + } + + if (err) + platform_device_put(pdev); + + return err; +} + +static struct acpi_table_header *get_table(void) +{ + struct acpi_table_header *table; + acpi_status status; + + if (acpi_disabled || !mpam_cpus_have_feature()) + return NULL; + + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + if (ACPI_FAILURE(status)) + return NULL; + + if (table->revision != 1) + return NULL; + + return table; +} + + + +static int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam; + int err; + + mpam = get_table(); + if (!mpam) + return 0; + + err = _parse_table(mpam); + acpi_put_table(mpam); + + return err; +} + +static int _count_msc(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct acpi_mpam_msc_node *tbl_msc; + int ret = 0; + + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + if (tbl_msc->length < sizeof(*tbl_msc)) + return -EINVAL; + + ret++; + + table_offset += tbl_msc->length; + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + } + + return ret; +} + + +int acpi_mpam_count_msc(void) +{ + struct acpi_table_header *mpam; + int ret; + + mpam = get_table(); + if (!mpam) + return 0; + + ret = _count_msc(mpam); + acpi_put_table(mpam); + + return ret; +} + +/* + * Call after ACPI devices have been created, which happens behind acpi_scan_init() + * called from subsys_initcall(). PCC requires the mailbox driver, which is + * initialised from postcore_initcall(). + */ +subsys_initcall_sync(acpi_mpam_parse); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8ab0a82b4da4..94cb47d740c9 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -566,7 +566,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = { ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI, - ACPI_SIG_NBFT }; + ACPI_SIG_NBFT, ACPI_SIG_MPAM }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h new file mode 100644 index 000000000000..0f1d3f07e789 --- /dev/null +++ b/include/linux/arm_mpam.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef __LINUX_ARM_MPAM_H +#define __LINUX_ARM_MPAM_H + +#include +#include + +struct mpam_msc; + +enum mpam_msc_iface { + MPAM_IFACE_MMIO, /* a real MPAM MSC */ + MPAM_IFACE_PCC, /* a fake MPAM MSC */ +}; + +enum mpam_class_types { + MPAM_CLASS_CACHE, /* Well known caches, e.g. L2 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ +}; + +#ifdef CONFIG_ACPI_MPAM +/* Parse the ACPI description of resources entries for this MSC. */ +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc); +int acpi_mpam_count_msc(void); +#else +static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + return -EINVAL; +} +static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +#endif + +static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + return -EINVAL; +} + +#endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 931a6af7f73d0a25bfb0b8ea80e498f50e21cc95 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 12 Nov 2021 13:24:35 -0600 Subject: [PATCH 609/953] dt-bindings: arm: Add MPAM MSC binding ANBZ: #8686 commit ce1052e983a1edec65defad8d94af1e263bd80de morse-linux. The binding is designed around the assumption that an MSC will be a sub-block of something else such as a memory controller, cache controller, or IOMMU. However, it's certainly possible a design does not have that association or has a mixture of both, so the binding illustrates how we can support that with RIS child nodes. A key part of MPAM is we need to know about all of the MSCs in the system before it can be enabled. This drives the need for the genericish 'arm,mpam-msc' compatible. Though we can't assume an MSC is accessible until a h/w specific driver potentially enables the h/w. Cc: James Morse Signed-off-by: Rob Herring Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- .../devicetree/bindings/arm/arm,mpam-msc.yaml | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml diff --git a/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml new file mode 100644 index 000000000000..9d542ecb1a7d --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml @@ -0,0 +1,227 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/arm,mpam-msc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Memory System Resource Partitioning and Monitoring (MPAM) + +description: | + The Arm MPAM specification can be found here: + + https://developer.arm.com/documentation/ddi0598/latest + +maintainers: + - Rob Herring + +properties: + compatible: + items: + - const: arm,mpam-msc # Further details are discoverable + - const: arm,mpam-memory-controller-msc + + reg: + maxItems: 1 + description: A memory region containing registers as defined in the MPAM + specification. + + interrupts: + minItems: 1 + items: + - description: error (optional) + - description: overflow (optional, only for monitoring) + + interrupt-names: + oneOf: + - items: + - enum: [ error, overflow ] + - items: + - const: error + - const: overflow + + arm,not-ready-us: + description: The maximum time in microseconds for monitoring data to be + accurate after a settings change. For more information, see the + Not-Ready (NRDY) bit description in the MPAM specification. + + numa-node-id: true # see NUMA binding + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^ris@[0-9a-f]$': + type: object + additionalProperties: false + description: | + RIS nodes for each RIS in an MSC. These nodes are required for each RIS + implementing known MPAM controls + + properties: + compatible: + enum: + # Bulk storage for cache + - arm,mpam-cache + # Memory bandwidth + - arm,mpam-memory + + reg: + minimum: 0 + maximum: 0xf + + cpus: + $ref: '/schemas/types.yaml#/definitions/phandle-array' + description: + Phandle(s) to the CPU node(s) this RIS belongs to. By default, the parent + device's affinity is used. + + arm,mpam-device: + $ref: '/schemas/types.yaml#/definitions/phandle' + description: + By default, the MPAM enabled device associated with a RIS is the MSC's + parent node. It is possible for each RIS to be associated with different + devices in which case 'arm,mpam-device' should be used. + + required: + - compatible + - reg + +required: + - compatible + - reg + +dependencies: + interrupts: [ interrupt-names ] + +additionalProperties: false + +examples: + - | + /* + cpus { + cpu@0 { + next-level-cache = <&L2_0>; + }; + cpu@100 { + next-level-cache = <&L2_1>; + }; + }; + */ + L2_0: cache-controller-0 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L2_1: cache-controller-1 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L3: cache-controller@30000000 { + compatible = "arm,dsu-l3-cache", "cache"; + cache-level = <3>; + cache-unified; + + ranges = <0x0 0x30000000 0x800000>; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + /* CPU affinity implied by parent cache node's */ + reg = <0x10000 0x2000>; + interrupts = <1>, <2>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + }; + }; + + mem: memory-controller@20000 { + compatible = "foo,a-memory-controller"; + reg = <0x20000 0x1000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + msc@21000 { + compatible = "arm,mpam-memory-controller-msc", "arm,mpam-msc"; + reg = <0x21000 0x1000>; + interrupts = <3>; + interrupt-names = "error"; + arm,not-ready-us = <1>; + numa-node-id = <1>; + }; + }; + + iommu@40000 { + reg = <0x40000 0x1000>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@41000 { + compatible = "arm,mpam-msc"; + reg = <0 0x1000>; + interrupts = <5>, <6>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@2 { + compatible = "arm,mpam-cache"; + reg = <0>; + // TODO: How to map to device(s)? + }; + }; + }; + + msc@80000 { + compatible = "foo,a-standalone-msc"; + reg = <0x80000 0x1000>; + + clocks = <&clks 123>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + reg = <0x10000 0x2000>; + interrupts = <7>; + interrupt-names = "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@0 { + compatible = "arm,mpam-cache"; + reg = <0>; + arm,mpam-device = <&L2_0>; + }; + + ris@1 { + compatible = "arm,mpam-memory"; + reg = <1>; + arm,mpam-device = <&mem>; + }; + }; + }; + +... -- Gitee From 21265246218b1b152b60f52e06c1d6a3d39aafef Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 14 Aug 2018 15:03:34 +0100 Subject: [PATCH 610/953] arm_mpam: Add probe/remove for mpam msc driver and kbuild boiler plate ANBZ: #8686 commit 05b457d3d8f70861448547ea67ecaed9d2f44e46 morse-linux. Probing MPAM is convoluted. MSCs that are integrated with a CPU may only be accessible from those CPUs, and they may not be online. Touching the hardware early is pointless as MPAM can't be used until the system-wide common values for num_partid and num_pmg have been discovered. Start with driver probe/remove and mapping the MSC. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/Kconfig | 1 + drivers/platform/Kconfig | 2 + drivers/platform/Makefile | 1 + drivers/platform/mpam/Kconfig | 6 + drivers/platform/mpam/Makefile | 1 + drivers/platform/mpam/mpam_devices.c | 355 ++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 48 ++++ 7 files changed, 414 insertions(+) create mode 100644 drivers/platform/mpam/Kconfig create mode 100644 drivers/platform/mpam/Makefile create mode 100644 drivers/platform/mpam/mpam_devices.c create mode 100644 drivers/platform/mpam/mpam_internal.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 66b4475c1972..7c09c2580869 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1977,6 +1977,7 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" select ACPI_MPAM if ACPI + select ARM_CPU_RESCTRL help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 868b20361769..f26534a4a83b 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -9,6 +9,8 @@ source "drivers/platform/chrome/Kconfig" source "drivers/platform/mellanox/Kconfig" +source "drivers/platform/mpam/Kconfig" + source "drivers/platform/olpc/Kconfig" source "drivers/platform/surface/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 8296d4c41eb7..54ee16e4e4d8 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam/ diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig new file mode 100644 index 000000000000..13bd86fc5e58 --- /dev/null +++ b/drivers/platform/mpam/Kconfig @@ -0,0 +1,6 @@ +# Confusingly, this is everything but the CPU bits of MPAM. CPU here means +# CPU resources, not containers or cgroups etc. +config ARM_CPU_RESCTRL + bool + depends on ARM64 + select RESCTRL_RMID_DEPENDS_ON_CLOSID diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile new file mode 100644 index 000000000000..8ad69bfa2aa2 --- /dev/null +++ b/drivers/platform/mpam/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c new file mode 100644 index 000000000000..885f8b61cb65 --- /dev/null +++ b/drivers/platform/mpam/mpam_devices.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +#define pr_fmt(fmt) "mpam: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "mpam_internal.h" + +/* + * mpam_list_lock protects the SRCU lists when writing. Once the + * mpam_enabled key is enabled these lists are read-only, + * unless the error interrupt disables the driver. + */ +static DEFINE_MUTEX(mpam_list_lock); +static LIST_HEAD(mpam_all_msc); + +static struct srcu_struct mpam_srcu; + +/* MPAM isn't available until all the MSC have been probed. */ +static u32 mpam_num_msc; + +static void mpam_discovery_complete(void) +{ + pr_err("Discovered all MSC\n"); +} + +static int mpam_dt_count_msc(void) +{ + int count = 0; + struct device_node *np; + + for_each_compatible_node(np, NULL, "arm,mpam-msc") + count++; + + return count; +} + +static int mpam_dt_parse_resource(struct mpam_msc *msc, struct device_node *np, + u32 ris_idx) +{ + int err = 0; + u32 level = 0; + unsigned long cache_id; + struct device_node *cache; + + do { + if (of_device_is_compatible(np, "arm,mpam-cache")) { + cache = of_parse_phandle(np, "arm,mpam-device", 0); + if (!cache) { + pr_err("Failed to read phandle\n"); + break; + } + } else if (of_device_is_compatible(np->parent, "cache")) { + cache = np->parent; + } else { + /* For now, only caches are supported */ + cache = NULL; + break; + } + + err = of_property_read_u32(cache, "cache-level", &level); + if (err) { + pr_err("Failed to read cache-level\n"); + break; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + err = -ENOENT; + break; + } + + err = mpam_ris_create(msc, ris_idx, MPAM_CLASS_CACHE, level, + cache_id); + } while (0); + of_node_put(cache); + + return err; +} + + +static int mpam_dt_parse_resources(struct mpam_msc *msc, void *ignored) +{ + int err, num_ris = 0; + const u32 *ris_idx_p; + struct device_node *iter, *np; + + np = msc->pdev->dev.of_node; + for_each_child_of_node(np, iter) { + ris_idx_p = of_get_property(iter, "reg", NULL); + if (ris_idx_p) { + num_ris++; + err = mpam_dt_parse_resource(msc, iter, *ris_idx_p); + if (err) { + of_node_put(iter); + return err; + } + } + } + + if (!num_ris) + mpam_dt_parse_resource(msc, np, 0); + + return err; +} + +static int get_msc_affinity(struct mpam_msc *msc) +{ + struct device_node *parent; + u32 affinity_id; + int err; + + if (!acpi_disabled) { + err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", + &affinity_id); + if (err) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = acpi_pptt_get_cpus_from_container(affinity_id, + &msc->accessibility); + } + + return err; + } + + /* This depends on the path to of_node */ + parent = of_get_parent(msc->pdev->dev.of_node); + if (parent == of_root) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = -EINVAL; + pr_err("Cannot determine CPU accessibility of MSC\n"); + } + of_node_put(parent); + + return err; +} + +static int fw_num_msc; + +static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) +{ + /* TODO: wake up tasks blocked on this MSC's PCC channel */ +} + +static int mpam_msc_drv_probe(struct platform_device *pdev) +{ + int err; + pgprot_t prot; + void * __iomem io; + struct mpam_msc *msc; + struct resource *msc_res; + void *plat_data = pdev->dev.platform_data; + + mutex_lock(&mpam_list_lock); + do { + msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); + if (!msc) { + err = -ENOMEM; + break; + } + + INIT_LIST_HEAD_RCU(&msc->glbl_list); + msc->pdev = pdev; + + err = device_property_read_u32(&pdev->dev, "arm,not-ready-us", + &msc->nrdy_usec); + if (err) { + /* This will prevent CSU monitors being usable */ + msc->nrdy_usec = 0; + } + + err = get_msc_affinity(msc); + if (err) + break; + if (cpumask_empty(&msc->accessibility)) { + pr_err_once("msc:%u is not accessible from any CPU!", + msc->id); + err = -EINVAL; + break; + } + + mutex_init(&msc->lock); + msc->id = mpam_num_msc++; + INIT_LIST_HEAD_RCU(&msc->ris); + spin_lock_init(&msc->part_sel_lock); + spin_lock_init(&msc->mon_sel_lock); + + if (device_property_read_u32(&pdev->dev, "pcc-channel", + &msc->pcc_subspace_id)) + msc->iface = MPAM_IFACE_MMIO; + else + msc->iface = MPAM_IFACE_PCC; + + if (msc->iface == MPAM_IFACE_MMIO) { + io = devm_platform_get_and_ioremap_resource(pdev, 0, + &msc_res); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(io); + break; + } + msc->mapped_hwpage_sz = msc_res->end - msc_res->start; + msc->mapped_hwpage = io; + } else if (msc->iface == MPAM_IFACE_PCC) { + msc->pcc_cl.dev = &pdev->dev; + msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; + msc->pcc_cl.tx_block = false; + msc->pcc_cl.tx_tout = 1000; /* 1s */ + msc->pcc_cl.knows_txdone = false; + + msc->pcc_chan = pcc_mbox_request_channel(&msc->pcc_cl, + msc->pcc_subspace_id); + if (IS_ERR(msc->pcc_chan)) { + pr_err("Failed to request MSC PCC channel\n"); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(msc->pcc_chan); + break; + } + + prot = __acpi_get_mem_attribute(msc->pcc_chan->shmem_base_addr); + io = ioremap_prot(msc->pcc_chan->shmem_base_addr, + msc->pcc_chan->shmem_size, pgprot_val(prot)); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + pcc_mbox_free_channel(msc->pcc_chan); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(io); + break; + } + + /* TODO: issue a read to update the registers */ + + msc->mapped_hwpage_sz = msc->pcc_chan->shmem_size; + msc->mapped_hwpage = io + sizeof(struct acpi_pcct_shared_memory); + } + + list_add_rcu(&msc->glbl_list, &mpam_all_msc); + platform_set_drvdata(pdev, msc); + } while (0); + mutex_unlock(&mpam_list_lock); + + if (!err) { + /* Create RIS entries described by firmware */ + if (!acpi_disabled) + err = acpi_mpam_parse_resources(msc, plat_data); + else + err = mpam_dt_parse_resources(msc, plat_data); + } + + if (!err && fw_num_msc == mpam_num_msc) + mpam_discovery_complete(); + + return err; +} + +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + +static const struct of_device_id mpam_of_match[] = { + { .compatible = "arm,mpam-msc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpam_of_match); + +static struct platform_driver mpam_msc_driver = { + .driver = { + .name = "mpam_msc", + .of_match_table = of_match_ptr(mpam_of_match), + }, + .probe = mpam_msc_drv_probe, + .remove = mpam_msc_drv_remove, +}; + +/* + * MSC that are hidden under caches are not created as platform devices + * as there is no cache driver. Caches are also special-cased in + * get_msc_affinity(). + */ +static void mpam_dt_create_foundling_msc(void) +{ + int err; + struct device_node *cache; + + for_each_compatible_node(cache, NULL, "cache") { + err = of_platform_populate(cache, mpam_of_match, NULL, NULL); + if (err) { + pr_err("Failed to create MSC devices under caches\n"); + } + } +} + +static int __init mpam_msc_driver_init(void) +{ + if (!mpam_cpus_have_feature()) + return -EOPNOTSUPP; + + init_srcu_struct(&mpam_srcu); + + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + if (acpi_disabled) + mpam_dt_create_foundling_msc(); + + return platform_driver_register(&mpam_msc_driver); +} +subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h new file mode 100644 index 000000000000..affd7999fcad --- /dev/null +++ b/drivers/platform/mpam/mpam_internal.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef MPAM_INTERNAL_H +#define MPAM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include + +struct mpam_msc +{ + /* member of mpam_all_msc */ + struct list_head glbl_list; + + int id; + struct platform_device *pdev; + + /* Not modified after mpam_is_enabled() becomes true */ + enum mpam_msc_iface iface; + u32 pcc_subspace_id; + struct mbox_client pcc_cl; + struct pcc_mbox_chan *pcc_chan; + u32 nrdy_usec; + cpumask_t accessibility; + + struct mutex lock; + unsigned long ris_idxs[128 / BITS_PER_LONG]; + u32 ris_max; + + /* mpam_msc_ris of this component */ + struct list_head ris; + + /* + * part_sel_lock protects access to the MSC hardware registers that are + * affected by MPAMCFG_PART_SEL. (including the ID registers) + * If needed, take msc->lock first. + */ + spinlock_t part_sel_lock; + void __iomem * mapped_hwpage; + size_t mapped_hwpage_sz; +}; + +#endif /* MPAM_INTERNAL_H */ -- Gitee From fe8bae8778cdee6ff784e83ade5cbe311eabfee8 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 5 May 2021 17:18:41 +0100 Subject: [PATCH 611/953] arm_mpam: Add the class and component structures for ris firmware described ANBZ: #8686 commit 5a666c3b177c179161ce86a695d62d3019f83a58 morse-linux. An MSC is a container of resources, each identified by their RIS index. Some RIS are described by firmware to provide their position in the system. Others are discovered when the driver probes the hardware. To configure a resource it needs to be found by its class, e.g. 'L2'. There are two kinds of grouping, a class is a set of components, which are visible as there are likely to be multiple instances of the L2 cache. struct mpam_components are a set of struct mpam_msc_ris, which are not visible as each L2 cache may be composed of individual slices which need to be configured the same as the hardware is not able to distribute the configuration. Add support for creating and destroying these structures. A gfp is passed as the structure for 'unknown' may need creating if a new RIS entry is discovered when probing the MSC. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 362 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 52 ++++ include/linux/arm_mpam.h | 7 +- 3 files changed, 412 insertions(+), 9 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 885f8b61cb65..599bd4ee5b00 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -37,11 +36,359 @@ static DEFINE_MUTEX(mpam_list_lock); static LIST_HEAD(mpam_all_msc); -static struct srcu_struct mpam_srcu; +struct srcu_struct mpam_srcu; /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; +/* + * An MSC is a container for resources, each identified by their RIS index. + * Components are a group of RIS that control the same thing. + * Classes are the set components of the same type. + * + * e.g. The set of RIS that make up the L2 are a component. These are sometimes + * termed slices. They should be configured as if they were one MSC. + * + * e.g. The SoC probably has more than one L2, each attached to a distinct set + * of CPUs. All the L2 components are grouped as a class. + * + * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, + * then linked via struct mpam_ris to a component and a class. + * The same MSC may exist under different class->component paths, but the RIS + * index will be unique. + */ +LIST_HEAD(mpam_classes); + +static struct mpam_component * +mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + comp = kzalloc(sizeof(*comp), gfp); + if (!comp) + return ERR_PTR(-ENOMEM); + + comp->comp_id = id; + INIT_LIST_HEAD_RCU(&comp->ris); + /* affinity is updated when ris are added */ + INIT_LIST_HEAD_RCU(&comp->class_list); + comp->class = class; + + list_add_rcu(&comp->class_list, &class->components); + + return comp; +} + +static struct mpam_component * +mpam_component_get(struct mpam_class *class, int id, bool alloc, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(comp, &class->components, class_list) { + if (comp->comp_id == id) + return comp; + } + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_component_alloc(class, id, gfp); +} + +static struct mpam_class * +mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) +{ + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + class = kzalloc(sizeof(*class), gfp); + if (!class) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD_RCU(&class->components); + /* affinity is updated when ris are added */ + class->level = level_idx; + class->type = type; + INIT_LIST_HEAD_RCU(&class->classes_list); + + list_add_rcu(&class->classes_list, &mpam_classes); + + return class; +} + +static struct mpam_class * +mpam_class_get(u8 level_idx, enum mpam_class_types type, bool alloc, gfp_t gfp) +{ + bool found = false; + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == type && class->level == level_idx) { + found = true; + break; + } + } + + if (found) + return class; + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_class_alloc(level_idx, type, gfp); +} + +static void mpam_class_destroy(struct mpam_class *class) +{ + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&class->classes_list); + synchronize_srcu(&mpam_srcu); + kfree(class); +} + +static void mpam_comp_destroy(struct mpam_component *comp) +{ + struct mpam_class *class = comp->class; + + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&comp->class_list); + synchronize_srcu(&mpam_srcu); + kfree(comp); + + if (list_empty(&class->components)) + mpam_class_destroy(class); +} + +/* synchronise_srcu() before freeing ris */ +static void mpam_ris_destroy(struct mpam_msc_ris *ris) +{ + struct mpam_component *comp = ris->comp; + struct mpam_class *class = comp->class; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + clear_bit(ris->ris_idx, msc->ris_idxs); + list_del_rcu(&ris->comp_list); + list_del_rcu(&ris->msc_list); + + cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); + + if (list_empty(&comp->ris)) + mpam_comp_destroy(comp); +} + +/* + * There are two ways of reaching a struct mpam_msc_ris. Via the + * class->component->ris, or via the msc. + * When destroying the msc, the other side needs unlinking and cleaning up too. + * synchronise_srcu() before freeing msc. + */ +static void mpam_msc_destroy(struct mpam_msc *msc) +{ + struct mpam_msc_ris *ris, *tmp; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) + mpam_ris_destroy(ris); +} + +/* + * The cacheinfo structures are only populated when CPUs are online. + * This helper walks the device tree to include offline CPUs too. + */ +static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, + cpumask_t *affinity) +{ + int cpu, err; + u32 iter_level; + int iter_cache_id; + struct device_node *iter; + + if (!acpi_disabled) + return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + + for_each_possible_cpu(cpu) { + iter = of_get_cpu_node(cpu, NULL); + if (!iter) { + pr_err("Failed to find cpu%d device node\n", cpu); + return -ENOENT; + } + + while ((iter = of_find_next_cache_node(iter))) { + err = of_property_read_u32(iter, "cache-level", + &iter_level); + if (err || (iter_level != cache_level)) { + of_node_put(iter); + continue; + } + + /* + * get_cpu_cacheinfo_id() isn't ready until sometime + * during device_initcall(). Use cache_of_get_id(). + */ + iter_cache_id = cache_of_get_id(iter); + if (cache_id == ~0UL) { + of_node_put(iter); + continue; + } + + if (iter_cache_id == cache_id) + cpumask_set_cpu(cpu, affinity); + + of_node_put(iter); + } + } + + return 0; +} + + +/* + * cpumask_of_node() only knows about online CPUs. This can't tell us whether + * a class is represented on all possible CPUs. + */ +static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (node_id == cpu_to_node(cpu)) + cpumask_set_cpu(cpu, affinity); + } +} + +static int get_cpumask_from_cache(struct device_node *cache, + cpumask_t *affinity) +{ + int err; + u32 cache_level; + int cache_id; + + err = of_property_read_u32(cache, "cache-level", &cache_level); + if (err) { + pr_err("Failed to read cache-level from cache node\n"); + return -ENOENT; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + pr_err("Failed to calculate cache-id from cache node\n"); + return -ENOENT; + } + + return get_cpumask_from_cache_id(cache_id, cache_level, affinity); +} + +static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, + enum mpam_class_types type, + struct mpam_class *class, + struct mpam_component *comp) +{ + int err; + + switch (type) { + case MPAM_CLASS_CACHE: + err = get_cpumask_from_cache_id(comp->comp_id, class->level, + affinity); + if (err) + return err; + + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with cache node", + dev_name(&msc->pdev->dev)); + + break; + case MPAM_CLASS_MEMORY: + get_cpumask_from_node_id(comp->comp_id, affinity); + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with memory node", + dev_name(&msc->pdev->dev)); + break; + case MPAM_CLASS_UNKNOWN: + return 0; + } + + cpumask_and(affinity, affinity, &msc->accessibility); + + return 0; +} + +static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, + int component_id, gfp_t gfp) +{ + int err; + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + if (test_and_set_bit(ris_idx, msc->ris_idxs)) + return -EBUSY; + + ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), gfp); + if (!ris) + return -ENOMEM; + + class = mpam_class_get(class_id, type, true, gfp); + if (IS_ERR(class)) + return PTR_ERR(class); + + comp = mpam_component_get(class, component_id, true, gfp); + if (IS_ERR(comp)) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return PTR_ERR(comp); + } + + err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); + if (err) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return err; + } + + ris->ris_idx = ris_idx; + INIT_LIST_HEAD_RCU(&ris->comp_list); + INIT_LIST_HEAD_RCU(&ris->msc_list); + ris->msc = msc; + ris->comp = comp; + + cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_or(&class->affinity, &class->affinity, &ris->affinity); + list_add_rcu(&ris->comp_list, &comp->ris); + + return 0; +} + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + int err; + + mutex_lock(&mpam_list_lock); + err = mpam_ris_create_locked(msc, ris_idx, type, class_id, + component_id, GFP_KERNEL); + mutex_unlock(&mpam_list_lock); + + return err; +} + static void mpam_discovery_complete(void) { pr_err("Discovered all MSC\n"); @@ -153,8 +500,14 @@ static int get_msc_affinity(struct mpam_msc *msc) cpumask_copy(&msc->accessibility, cpu_possible_mask); err = 0; } else { - err = -EINVAL; - pr_err("Cannot determine CPU accessibility of MSC\n"); + if (of_device_is_compatible(parent, "cache")) { + err = get_cpumask_from_cache(parent, + &msc->accessibility); + } else { + err = -EINVAL; + pr_err("Cannot determine accessibility of MSC: %s\n", + dev_name(&msc->pdev->dev)); + } } of_node_put(parent); @@ -291,6 +644,7 @@ static int mpam_msc_drv_remove(struct platform_device *pdev) mpam_num_msc--; platform_set_drvdata(pdev, NULL); list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); synchronize_srcu(&mpam_srcu); mutex_unlock(&mpam_list_lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index affd7999fcad..07d9c70bf1e6 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -11,6 +11,7 @@ #include #include #include +#include struct mpam_msc { @@ -45,4 +46,55 @@ struct mpam_msc size_t mapped_hwpage_sz; }; +struct mpam_class +{ + /* mpam_components in this class */ + struct list_head components; + + cpumask_t affinity; + + u8 level; + enum mpam_class_types type; + + /* member of mpam_classes */ + struct list_head classes_list; +}; + +struct mpam_component +{ + u32 comp_id; + + /* mpam_msc_ris in this component */ + struct list_head ris; + + cpumask_t affinity; + + /* member of mpam_class:components */ + struct list_head class_list; + + /* parent: */ + struct mpam_class *class; +}; + +struct mpam_msc_ris +{ + u8 ris_idx; + + cpumask_t affinity; + + /* member of mpam_component:ris */ + struct list_head comp_list; + + /* member of mpam_msc:ris */ + struct list_head msc_list; + + /* parents: */ + struct mpam_msc *msc; + struct mpam_component *comp; +}; + +/* List of all classes */ +extern struct list_head mpam_classes; +extern struct srcu_struct mpam_srcu; + #endif /* MPAM_INTERNAL_H */ diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 0f1d3f07e789..950ea7049d53 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -34,10 +34,7 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, static inline int acpi_mpam_count_msc(void) { return -EINVAL; } #endif -static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, - enum mpam_class_types type, u8 class_id, int component_id) -{ - return -EINVAL; -} +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 7dce62dfc6f6bda548dcfb7d28b016ac63447817 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 13 Dec 2018 11:41:37 +0000 Subject: [PATCH 612/953] arm_mpam: Add MPAM MSC register layout definitions ANBZ: #8686 commit eb95c932eec969d1597429391e526dce2052a3cb morse-linux. Memory Partitioning and Monitoring (MPAM) has memory mapped devices (MSCs) with an identity/configuration page. Add the definitions for these registers as offset within the page(s). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_internal.h | 254 ++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 07d9c70bf1e6..5d339e4375e2 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -97,4 +97,258 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; + +/* + * MPAM MSCs have the following register layout. See: + * Arm Architecture Reference Manual Supplement - Memory System Resource + * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a + */ +#define MPAM_ARCHITECTURE_V1 0x10 + +/* Memory mapped control pages: */ +/* ID Register offsets in the memory mapped page */ +#define MPAMF_IDR 0x0000 /* features id register */ +#define MPAMF_MSMON_IDR 0x0080 /* performance monitoring features */ +#define MPAMF_IMPL_IDR 0x0028 /* imp-def partitioning */ +#define MPAMF_CPOR_IDR 0x0030 /* cache-portion partitioning */ +#define MPAMF_CCAP_IDR 0x0038 /* cache-capacity partitioning */ +#define MPAMF_MBW_IDR 0x0040 /* mem-bw partitioning */ +#define MPAMF_PRI_IDR 0x0048 /* priority partitioning */ +#define MPAMF_CSUMON_IDR 0x0088 /* cache-usage monitor */ +#define MPAMF_MBWUMON_IDR 0x0090 /* mem-bw usage monitor */ +#define MPAMF_PARTID_NRW_IDR 0x0050 /* partid-narrowing */ +#define MPAMF_IIDR 0x0018 /* implementer id register */ +#define MPAMF_AIDR 0x0020 /* architectural id register */ + +/* Configuration and Status Register offsets in the memory mapped page */ +#define MPAMCFG_PART_SEL 0x0100 /* partid to configure: */ +#define MPAMCFG_CPBM 0x1000 /* cache-portion config */ +#define MPAMCFG_CMAX 0x0108 /* cache-capacity config */ +#define MPAMCFG_MBW_MIN 0x0200 /* min mem-bw config */ +#define MPAMCFG_MBW_MAX 0x0208 /* max mem-bw config */ +#define MPAMCFG_MBW_WINWD 0x0220 /* mem-bw accounting window config */ +#define MPAMCFG_MBW_PBM 0x2000 /* mem-bw portion bitmap config */ +#define MPAMCFG_PRI 0x0400 /* priority partitioning config */ +#define MPAMCFG_MBW_PROP 0x0500 /* mem-bw stride config */ +#define MPAMCFG_INTPARTID 0x0600 /* partid-narrowing config */ + +#define MSMON_CFG_MON_SEL 0x0800 /* monitor selector */ +#define MSMON_CFG_CSU_FLT 0x0810 /* cache-usage monitor filter */ +#define MSMON_CFG_CSU_CTL 0x0818 /* cache-usage monitor config */ +#define MSMON_CFG_MBWU_FLT 0x0820 /* mem-bw monitor filter */ +#define MSMON_CFG_MBWU_CTL 0x0828 /* mem-bw monitor config */ +#define MSMON_CSU 0x0840 /* current cache-usage */ +#define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ +#define MSMON_MBWU 0x0860 /* current mem-bw usage value */ +#define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ +#define MPAMF_ESR 0x00F8 /* error status register */ +#define MPAMF_ECR 0x00F0 /* error control register */ + +/* MPAMF_IDR - MPAM features ID register */ +#define MPAMF_IDR_PARTID_MAX GENMASK(15, 0) +#define MPAMF_IDR_PMG_MAX GENMASK(23, 16) +#define MPAMF_IDR_HAS_CCAP_PART BIT(24) +#define MPAMF_IDR_HAS_CPOR_PART BIT(25) +#define MPAMF_IDR_HAS_MBW_PART BIT(26) +#define MPAMF_IDR_HAS_PRI_PART BIT(27) +#define MPAMF_IDR_HAS_EXT BIT(28) +#define MPAMF_IDR_HAS_IMPL_IDR BIT(29) +#define MPAMF_IDR_HAS_MSMON BIT(30) +#define MPAMF_IDR_HAS_PARTID_NRW BIT(31) +#define MPAMF_IDR_HAS_RIS BIT(32) +#define MPAMF_IDR_HAS_EXT_ESR BIT(38) +#define MPAMF_IDR_HAS_ESR BIT(39) +#define MPAMF_IDR_RIS_MAX GENMASK(59, 56) + + +/* MPAMF_MSMON_IDR - MPAM performance monitoring ID register */ +#define MPAMF_MSMON_IDR_MSMON_CSU BIT(16) +#define MPAMF_MSMON_IDR_MSMON_MBWU BIT(17) +#define MPAMF_MSMON_IDR_HAS_LOCAL_CAPT_EVNT BIT(31) + +/* MPAMF_CPOR_IDR - MPAM features cache portion partitioning ID register */ +#define MPAMF_CPOR_IDR_CPBM_WD GENMASK(15, 0) + +/* MPAMF_CCAP_IDR - MPAM features cache capacity partitioning ID register */ +#define MPAMF_CCAP_IDR_CMAX_WD GENMASK(5, 0) + +/* MPAMF_MBW_IDR - MPAM features memory bandwidth partitioning ID register */ +#define MPAMF_MBW_IDR_BWA_WD GENMASK(5, 0) +#define MPAMF_MBW_IDR_HAS_MIN BIT(10) +#define MPAMF_MBW_IDR_HAS_MAX BIT(11) +#define MPAMF_MBW_IDR_HAS_PBM BIT(12) +#define MPAMF_MBW_IDR_HAS_PROP BIT(13) +#define MPAMF_MBW_IDR_WINDWR BIT(14) +#define MPAMF_MBW_IDR_BWPBM_WD GENMASK(28, 16) + +/* MPAMF_PRI_IDR - MPAM features priority partitioning ID register */ +#define MPAMF_PRI_IDR_HAS_INTPRI BIT(0) +#define MPAMF_PRI_IDR_INTPRI_0_IS_LOW BIT(1) +#define MPAMF_PRI_IDR_INTPRI_WD GENMASK(9, 4) +#define MPAMF_PRI_IDR_HAS_DSPRI BIT(16) +#define MPAMF_PRI_IDR_DSPRI_0_IS_LOW BIT(17) +#define MPAMF_PRI_IDR_DSPRI_WD GENMASK(25, 20) + +/* MPAMF_CSUMON_IDR - MPAM cache storage usage monitor ID register */ +#define MPAMF_CSUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_CSUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ +#define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_MBWUMON_IDR_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_LWD BIT(29) +#define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) +#define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_PARTID_NRW_IDR - MPAM PARTID narrowing ID register */ +#define MPAMF_PARTID_NRW_IDR_INTPARTID_MAX GENMASK(15, 0) + +/* MPAMF_IIDR - MPAM implementation ID register */ +#define MPAMF_IIDR_PRODUCTID GENMASK(31, 20) +#define MPAMF_IIDR_PRODUCTID_SHIFT 20 +#define MPAMF_IIDR_VARIANT GENMASK(19, 16) +#define MPAMF_IIDR_VARIANT_SHIFT 16 +#define MPAMF_IIDR_REVISON GENMASK(15, 12) +#define MPAMF_IIDR_REVISON_SHIFT 12 +#define MPAMF_IIDR_IMPLEMENTER GENMASK(11, 0) +#define MPAMF_IIDR_IMPLEMENTER_SHIFT 0 + +/* MPAMF_AIDR - MPAM architecture ID register */ +#define MPAMF_AIDR_ARCH_MAJOR_REV GENMASK(7, 4) +#define MPAMF_AIDR_ARCH_MINOR_REV GENMASK(3, 0) + +/* MPAMCFG_PART_SEL - MPAM partition configuration selection register */ +#define MPAMCFG_PART_SEL_PARTID_SEL GENMASK(15, 0) +#define MPAMCFG_PART_SEL_INTERNAL BIT(16) +#define MPAMCFG_PART_SEL_RIS GENMASK(27, 24) + +/* MPAMCFG_CMAX - MPAM cache portion bitmap partition configuration register */ +#define MPAMCFG_CMAX_CMAX GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MIN - MPAM memory minimum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MIN_MIN GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MAX - MPAM memory maximum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MAX_MAX GENMASK(15, 0) +#define MPAMCFG_MBW_MAX_HARDLIM BIT(31) + +/* + * MPAMCFG_MBW_WINWD - MPAM memory bandwidth partitioning window width + * register + */ +#define MPAMCFG_MBW_WINWD_US_FRAC GENMASK(7, 0) +#define MPAMCFG_MBW_WINWD_US_INT GENMASK(23, 8) + + +/* MPAMCFG_PRI - MPAM priority partitioning configuration register */ +#define MPAMCFG_PRI_INTPRI GENMASK(15, 0) +#define MPAMCFG_PRI_DSPRI GENMASK(31, 16) + +/* + * MPAMCFG_MBW_PROP - Memory bandwidth proportional stride partitioning + * configuration register + */ +#define MPAMCFG_MBW_PROP_STRIDEM1 GENMASK(15, 0) +#define MPAMCFG_MBW_PROP_EN BIT(31) + +/* + * MPAMCFG_INTPARTID - MPAM internal partition narrowing configuration register + */ +#define MPAMCFG_INTPARTID_INTPARTID GENMASK(15, 0) +#define MPAMCFG_INTPARTID_INTERNAL BIT(16) + +/* MSMON_CFG_MON_SEL - Memory system performance monitor selection register */ +#define MSMON_CFG_MON_SEL_MON_SEL GENMASK(7, 0) +#define MSMON_CFG_MON_SEL_RIS GENMASK(27, 24) + +/* MPAMF_ESR - MPAM Error Status Register */ +#define MPAMF_ESR_PARTID_OR_MON GENMASK(15, 0) +#define MPAMF_ESR_PMG GENMASK(23, 16) +#define MPAMF_ESR_ERRCODE GENMASK(27, 24) +#define MPAMF_ESR_OVRWR BIT(31) +#define MPAMF_ESR_RIS GENMASK(35, 32) + +/* MPAMF_ECR - MPAM Error Control Register */ +#define MPAMF_ECR_INTEN BIT(0) + +/* Error conditions in accessing memory mapped registers */ +#define MPAM_ERRCODE_NONE 0 +#define MPAM_ERRCODE_PARTID_SEL_RANGE 1 +#define MPAM_ERRCODE_REQ_PARTID_RANGE 2 +#define MPAM_ERRCODE_MSMONCFG_ID_RANGE 3 +#define MPAM_ERRCODE_REQ_PMG_RANGE 4 +#define MPAM_ERRCODE_MONITOR_RANGE 5 +#define MPAM_ERRCODE_INTPARTID_RANGE 6 +#define MPAM_ERRCODE_UNEXPECTED_INTERNAL 7 + +/* + * MSMON_CFG_CSU_FLT - Memory system performance monitor configure cache storage + * usage monitor filter register + */ +#define MSMON_CFG_CSU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_CSU_FLT_PMG GENMASK(23, 16) + +/* + * MSMON_CFG_CSU_CTL - Memory system performance monitor configure cache storage + * usage monitor control register + * MSMON_CFG_MBWU_CTL - Memory system performance monitor configure memory + * bandwidth usage monitor control register + */ +#define MSMON_CFG_x_CTL_TYPE GENMASK(7, 0) +#define MSMON_CFG_x_CTL_MATCH_PARTID BIT(16) +#define MSMON_CFG_x_CTL_MATCH_PMG BIT(17) +#define MSMON_CFG_x_CTL_SCLEN BIT(19) +#define MSMON_CFG_x_CTL_SUBTYPE GENMASK(23, 20) +#define MSMON_CFG_x_CTL_OFLOW_FRZ BIT(24) +#define MSMON_CFG_x_CTL_OFLOW_INTR BIT(25) +#define MSMON_CFG_x_CTL_OFLOW_STATUS BIT(26) +#define MSMON_CFG_x_CTL_CAPT_RESET BIT(27) +#define MSMON_CFG_x_CTL_CAPT_EVNT GENMASK(30, 28) +#define MSMON_CFG_x_CTL_EN BIT(31) + +#define MSMON_CFG_MBWU_CTL_TYPE_MBWU 0x42 +#define MSMON_CFG_MBWU_CTL_TYPE_CSU 0x43 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_NONE 0 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_READ 1 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_WRITE 2 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_BOTH 3 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MAX 3 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MASK 0x3 + +/* + * MSMON_CFG_MBWU_FLT - Memory system performance monitor configure memory + * bandwidth usage monitor filter register + */ +#define MSMON_CFG_MBWU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_MBWU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_MBWU_FLT_RWBW GENMASK(31, 30) + +/* + * MSMON_CSU - Memory system performance monitor cache storage usage monitor + * register + * MSMON_CSU_CAPTURE - Memory system performance monitor cache storage usage + * capture register + * MSMON_MBWU - Memory system performance monitor memory bandwidth usage + * monitor register + * MSMON_MBWU_CAPTURE - Memory system performance monitor memory bandwidth usage + * capture register + */ +#define MSMON___VALUE GENMASK(30, 0) +#define MSMON___NRDY BIT(31) +#define MSMON_MBWU_L_VALUE GENMASK(62, 0) +/* + * MSMON_CAPT_EVNT - Memory system performance monitoring capture event + * generation register + */ +#define MSMON_CAPT_EVNT_NOW BIT(0) + #endif /* MPAM_INTERNAL_H */ -- Gitee From 93dd604118b85a1d42873a1ea6bafe57c22cec9b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 4 May 2021 18:12:42 +0100 Subject: [PATCH 613/953] arm_mpam: Add cpuhp callbacks to probe MSC hardware ANBZ: #8686 commit f4d4d6b4ee651ec7573b4bc521ed6ff101ab934f morse-linux. Because an MSC can only by accessed from the CPUs in its cpu-affinity set we need to be running on one of those CPUs to probe the MSC hardware. Do this work in the cpuhp callback. Probing the hardware will only happen before MPAM is enabled, walk all the MSCs and probe those we can reach that haven't already been probed. Later, enabling MPAM will enable a static key which will allow mpam_discovery_cpu_online() and its mutex to be skipped. Enabling a static key will also take the cpuhp lock, so can't be done from the cpuhp callback. Whenever a new MSC has been probed schedule work to test if all the MSCs have now been probed. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 148 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 5 +- 2 files changed, 149 insertions(+), 4 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 599bd4ee5b00..48811c90b78a 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -4,6 +4,7 @@ #define pr_fmt(fmt) "mpam: " fmt #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include @@ -41,6 +43,16 @@ struct srcu_struct mpam_srcu; /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; +static int mpam_cpuhp_state; +static DEFINE_MUTEX(mpam_cpuhp_state_lock); + +/* + * mpam is enabled once all devices have been probed from CPU online callbacks, + * scheduled via this work_struct. If access to an MSC depends on a CPU that + * was not brought online at boot, this can happen surprisingly late. + */ +static DECLARE_WORK(mpam_enable_work, &mpam_enable); + /* * An MSC is a container for resources, each identified by their RIS index. * Components are a group of RIS that control the same thing. @@ -59,6 +71,24 @@ static u32 mpam_num_msc; */ LIST_HEAD(mpam_classes); +static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) +{ + WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + return readl_relaxed(msc->mapped_hwpage + reg); +} + +#define mpam_read_partsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + ____ret = __mpam_read_reg(msc, MPAMF_##reg); \ + \ + ____ret; \ +}) + static struct mpam_component * mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) { @@ -389,9 +419,81 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, return err; } -static void mpam_discovery_complete(void) +static int mpam_msc_hw_probe(struct mpam_msc *msc) +{ + u64 idr; + int err; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + idr = mpam_read_partsel_reg(msc, AIDR); + if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { + pr_err_once("%s does not match MPAM architecture v1.0\n", + dev_name(&msc->pdev->dev)); + err = -EIO; + } else { + msc->probed = true; + err = 0; + } + spin_unlock(&msc->part_sel_lock); + + return err; +} + +static int mpam_cpu_online(unsigned int cpu) { - pr_err("Discovered all MSC\n"); + return 0; +} + +/* Before mpam is enabled, try to probe new MSC */ +static int mpam_discovery_cpu_online(unsigned int cpu) +{ + int err = 0; + struct mpam_msc *msc; + bool new_device_probed = false; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (!msc->probed) + err = mpam_msc_hw_probe(msc); + mutex_unlock(&msc->lock); + + if (!err) + new_device_probed = true; + else + break; // mpam_broken + } + mutex_unlock(&mpam_list_lock); + + if (new_device_probed && !err) + schedule_work(&mpam_enable_work); + + if (err < 0) + return err; + + return mpam_cpu_online(cpu); +} + +static int mpam_cpu_offline(unsigned int cpu) +{ + return 0; +} + +static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) +{ + mutex_lock(&mpam_cpuhp_state_lock); + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mpam:online", + online, mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) { + pr_err("Failed to register cpuhp callbacks"); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); } static int mpam_dt_count_msc(void) @@ -628,11 +730,51 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) } if (!err && fw_num_msc == mpam_num_msc) - mpam_discovery_complete(); + mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); return err; } +static void mpam_enable_once(void) +{ + mutex_lock(&mpam_cpuhp_state_lock); + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_register_cpuhp_callbacks(mpam_cpu_online); + + pr_info("MPAM enabled\n"); +} + +/* + * Enable mpam once all devices have been probed. + * Scheduled by mpam_discovery_cpu_online() once all devices have been created. + * Also scheduled when new devices are probed when new CPUs come online. + */ +void mpam_enable(struct work_struct *work) +{ + static atomic_t once; + struct mpam_msc *msc; + bool all_devices_probed = true; + + /* Have we probed all the hw devices? */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + mutex_lock(&msc->lock); + if (!msc->probed) + all_devices_probed = false; + mutex_unlock(&msc->lock); + + if (!all_devices_probed) + break; + } + mutex_unlock(&mpam_list_lock); + + if (all_devices_probed && !atomic_fetch_inc(&once)) + mpam_enable_once(); +} + static int mpam_msc_drv_remove(struct platform_device *pdev) { struct mpam_msc *msc = platform_get_drvdata(pdev); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 5d339e4375e2..d5d567fe57ed 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -30,6 +30,7 @@ struct mpam_msc cpumask_t accessibility; struct mutex lock; + bool probed; unsigned long ris_idxs[128 / BITS_PER_LONG]; u32 ris_max; @@ -97,6 +98,8 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; +/* Scheduled work callback to enable mpam once all MSC have been probed */ +void mpam_enable(struct work_struct *work); /* * MPAM MSCs have the following register layout. See: @@ -196,7 +199,7 @@ extern struct srcu_struct mpam_srcu; /* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ #define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) -#define MPAMF_MBWUMON_IDR_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_HAS_RWBW BIT(28) #define MPAMF_MBWUMON_IDR_LWD BIT(29) #define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) #define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) -- Gitee From 42e191ea518d01da756a8659b1e7c55c3213c47a Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 5 Dec 2023 14:04:33 +0000 Subject: [PATCH 614/953] arm_mpam: Probe MSCs to find the supported partid/pmg values ANBZ: #8686 commit 77115dd523dedacef5a4d3504cc5f41a97dd053c morse-linux. CPUs can generate traffic with a range of PARTID and PMG values, but each MSC may have its own maximum size for these fields. Before MPAM can be used, the driver needs to probe each RIS on each MSC, to find the system-wide smallest value that can be used. While doing this, RIS entries that firmware didn't describe are create under MPAM_CLASS_UNKNOWN. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kernel/mpam.c | 11 ++ drivers/platform/mpam/mpam_devices.c | 166 ++++++++++++++++++++++++-- drivers/platform/mpam/mpam_internal.h | 6 + include/linux/arm_mpam.h | 2 + 4 files changed, 178 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index 346f0273b2c5..02f43334f078 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -3,6 +3,7 @@ #include +#include #include #include @@ -10,3 +11,13 @@ DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); DEFINE_STATIC_KEY_FALSE(mpam_enabled); DEFINE_PER_CPU(u64, arm64_mpam_default); DEFINE_PER_CPU(u64, arm64_mpam_current); + +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} +arch_initcall(arm64_mpam_register_cpus) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 48811c90b78a..ec22c279cfe9 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,15 @@ static u32 mpam_num_msc; static int mpam_cpuhp_state; static DEFINE_MUTEX(mpam_cpuhp_state_lock); +/* + * The smallest common values for any CPU or MSC in the system. + * Generating traffic outside this range will result in screaming interrupts. + */ +u16 mpam_partid_max; +u8 mpam_pmg_max; +static bool partid_max_init, partid_max_published; +static DEFINE_SPINLOCK(partid_max_lock); + /* * mpam is enabled once all devices have been probed from CPU online callbacks, * scheduled via this work_struct. If access to an MSC depends on a CPU that @@ -79,6 +89,14 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) return readl_relaxed(msc->mapped_hwpage + reg); } +static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) +{ + WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(val, msc->mapped_hwpage + reg); +} + #define mpam_read_partsel_reg(msc, reg) \ ({ \ u32 ____ret; \ @@ -89,6 +107,59 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) ____ret; \ }) +#define mpam_write_partsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + __mpam_write_reg(msc, MPAMCFG_##reg, val); \ +}) + +static u64 mpam_msc_read_idr(struct mpam_msc *msc) +{ + u64 idr_high = 0, idr_low; + + lockdep_assert_held(&msc->part_sel_lock); + + idr_low = mpam_read_partsel_reg(msc, IDR); + if (FIELD_GET(MPAMF_IDR_HAS_EXT, idr_low)) + idr_high = mpam_read_partsel_reg(msc, IDR + 4); + + return (idr_high << 32) | idr_low; +} + +static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) +{ + u32 partsel; + + lockdep_assert_held(&msc->part_sel_lock); + + partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | + FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); + mpam_write_partsel_reg(msc, PART_SEL, partsel); +} + +int mpam_register_requestor(u16 partid_max, u8 pmg_max) +{ + int err = 0; + + spin_lock(&partid_max_lock); + if (!partid_max_init) { + mpam_partid_max = partid_max; + mpam_pmg_max = pmg_max; + partid_max_init = true; + } else if (!partid_max_published) { + mpam_partid_max = min(mpam_partid_max, partid_max); + mpam_pmg_max = min(mpam_pmg_max, pmg_max); + } else { + /* New requestors can't lower the values */ + if ((partid_max < mpam_partid_max) || (pmg_max < mpam_pmg_max)) + err = -EBUSY; + } + spin_unlock(&partid_max_lock); + + return err; +} +EXPORT_SYMBOL(mpam_register_requestor); + static struct mpam_component * mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) { @@ -402,6 +473,7 @@ static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); cpumask_or(&class->affinity, &class->affinity, &ris->affinity); list_add_rcu(&ris->comp_list, &comp->ris); + list_add_rcu(&ris->msc_list, &msc->ris); return 0; } @@ -419,10 +491,37 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, return err; } +static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, + u8 ris_idx) +{ + int err; + struct mpam_msc_ris *ris, *found = ERR_PTR(-ENOENT); + + lockdep_assert_held(&mpam_list_lock); + + if (!test_bit(ris_idx, msc->ris_idxs)) { + err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, + 0, 0, GFP_ATOMIC); + if (err) + return ERR_PTR(err); + } + + list_for_each_entry(ris, &msc->ris, msc_list) { + if (ris->ris_idx == ris_idx) { + found = ris; + break; + } + } + + return found; +} + static int mpam_msc_hw_probe(struct mpam_msc *msc) { u64 idr; - int err; + u16 partid_max; + u8 ris_idx, pmg_max; + struct mpam_msc_ris *ris; lockdep_assert_held(&msc->lock); @@ -431,14 +530,43 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { pr_err_once("%s does not match MPAM architecture v1.0\n", dev_name(&msc->pdev->dev)); - err = -EIO; - } else { - msc->probed = true; - err = 0; + spin_unlock(&msc->part_sel_lock); + return -EIO; } + + idr = mpam_msc_read_idr(msc); spin_unlock(&msc->part_sel_lock); + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); + + /* Use these values so partid/pmg always starts with a valid value */ + msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + + for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + msc->partid_max = min(msc->partid_max, partid_max); + msc->pmg_max = min(msc->pmg_max, pmg_max); + + ris = mpam_get_or_create_ris(msc, ris_idx); + if (IS_ERR(ris)) { + return PTR_ERR(ris); + } + } - return err; + spin_lock(&partid_max_lock); + mpam_partid_max = min(mpam_partid_max, msc->partid_max); + mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); + spin_unlock(&partid_max_lock); + + msc->probed = true; + + return 0; } static int mpam_cpu_online(unsigned int cpu) @@ -742,9 +870,18 @@ static void mpam_enable_once(void) mpam_cpuhp_state = 0; mutex_unlock(&mpam_cpuhp_state_lock); + /* + * Once the cpuhp callbacks have been changed, mpam_partid_max can no + * longer change. + */ + spin_lock(&partid_max_lock); + partid_max_published = true; + spin_unlock(&partid_max_lock); + mpam_register_cpuhp_callbacks(mpam_cpu_online); - pr_info("MPAM enabled\n"); + pr_info("MPAM enabled with %u partid and %u pmg\n", + mpam_partid_max + 1, mpam_pmg_max + 1); } /* @@ -828,11 +965,25 @@ static void mpam_dt_create_foundling_msc(void) static int __init mpam_msc_driver_init(void) { + bool mpam_not_available = false; + if (!mpam_cpus_have_feature()) return -EOPNOTSUPP; init_srcu_struct(&mpam_srcu); + /* + * If the MPAM CPU interface is not implemented, or reserved by + * firmware, there is no point touching the rest of the hardware. + */ + spin_lock(&partid_max_lock); + if (!partid_max_init || (!mpam_partid_max && !mpam_pmg_max)) + mpam_not_available = true; + spin_unlock(&partid_max_lock); + + if (mpam_not_available) + return 0; + if (!acpi_disabled) fw_num_msc = acpi_mpam_count_msc(); else @@ -848,4 +999,5 @@ static int __init mpam_msc_driver_init(void) return platform_driver_register(&mpam_msc_driver); } +/* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index d5d567fe57ed..a7de4a69b9f8 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -31,6 +31,8 @@ struct mpam_msc struct mutex lock; bool probed; + u16 partid_max; + u8 pmg_max; unsigned long ris_idxs[128 / BITS_PER_LONG]; u32 ris_max; @@ -98,6 +100,10 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; +/* System wide partid/pmg values */ +extern u16 mpam_partid_max; +extern u8 mpam_pmg_max; + /* Scheduled work callback to enable mpam once all MSC have been probed */ void mpam_enable(struct work_struct *work); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 950ea7049d53..40e09b4d236b 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -34,6 +34,8 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, static inline int acpi_mpam_count_msc(void) { return -EINVAL; } #endif +int mpam_register_requestor(u16 partid_max, u8 pmg_max); + int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); -- Gitee From bfbe671c653684e760525f53b130c7a01c746fc3 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 26 Jan 2021 17:10:44 +0000 Subject: [PATCH 615/953] arm_mpam: Probe the hardware features resctrl supports ANBZ: #8686 commit f7c52f94545ccc4c0f7882e68985efefa1b8e2ed morse-linux. Expand the probing support with the control and monitor types we can use with resctrl. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 79 ++++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 50 +++++++++++++++++ 2 files changed, 127 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ec22c279cfe9..7eeeb44506b1 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -83,7 +83,7 @@ LIST_HEAD(mpam_classes); static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) { - WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); return readl_relaxed(msc->mapped_hwpage + reg); @@ -91,7 +91,7 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) { - WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); writel_relaxed(val, msc->mapped_hwpage + reg); @@ -516,6 +516,74 @@ static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, return found; } +static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) +{ + int err; + struct mpam_msc *msc = ris->msc; + struct mpam_props *props = &ris->props; + + lockdep_assert_held(&msc->lock); + lockdep_assert_held(&msc->part_sel_lock); + + /* Cache Portion partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { + u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); + + props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); + if (props->cpbm_wd) + mpam_set_feature(mpam_feat_cpor_part, props); + } + + /* Memory bandwidth partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { + u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); + + /* portion bitmap resolution */ + props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); + if (props->mbw_pbm_bits && + FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) + mpam_set_feature(mpam_feat_mbw_part, props); + + props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) + mpam_set_feature(mpam_feat_mbw_max, props); + } + + /* Performance Monitoring */ + if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { + u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); + + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { + u32 csumonidr, discard; + + /* + * If the firmware max-nrdy-us property is missing, the + * CSU counters can't be used. Should we wait forever? + */ + err = device_property_read_u32(&msc->pdev->dev, + "arm,not-ready-us", + &discard); + + csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); + props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); + if (props->num_csu_mon && !err) + mpam_set_feature(mpam_feat_msmon_csu, props); + else if (props->num_csu_mon) + pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); + } + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); + + props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); + if (props->num_mbwu_mon) + mpam_set_feature(mpam_feat_msmon_mbwu, props); + + if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + } + } +} + static int mpam_msc_hw_probe(struct mpam_msc *msc) { u64 idr; @@ -536,6 +604,7 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) idr = mpam_msc_read_idr(msc); spin_unlock(&msc->part_sel_lock); + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); /* Use these values so partid/pmg always starts with a valid value */ @@ -557,6 +626,12 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) if (IS_ERR(ris)) { return PTR_ERR(ris); } + ris->idr = idr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + mpam_ris_hw_probe(ris); + spin_unlock(&msc->part_sel_lock); } spin_lock(&partid_max_lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index a7de4a69b9f8..71e62594876d 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -49,6 +49,54 @@ struct mpam_msc size_t mapped_hwpage_sz; }; +/* + * When we compact the supported features, we don't care what they are. + * Storing them as a bitmap makes life easy. + */ +typedef u16 mpam_features_t; + +/* Bits for mpam_features_t */ +enum mpam_device_features { + mpam_feat_ccap_part = 0, + mpam_feat_cpor_part, + mpam_feat_mbw_part, + mpam_feat_mbw_min, + mpam_feat_mbw_max, + mpam_feat_mbw_prop, + mpam_feat_msmon, + mpam_feat_msmon_csu, + mpam_feat_msmon_csu_capture, + mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_capture, + mpam_feat_msmon_mbwu_rwbw, + mpam_feat_msmon_capt, + MPAM_FEATURE_LAST, +}; +#define MPAM_ALL_FEATURES ((1<features; +} + +static inline void mpam_set_feature(enum mpam_device_features feat, + struct mpam_props *props) +{ + props->features |= (1< Date: Fri, 7 May 2021 12:45:15 +0100 Subject: [PATCH 616/953] arm_mpam: Merge supported features during mpam_enable() into mpam_class ANBZ: #8686 commit b5d7e31197b22fe33c513a3e267e4e1dde94ae18 morse-linux. To make a decision about whether to expose an mpam class as a resctrl resource we need to know its overall supported features and properties. Once we've probed all the resources, we can walk the tree and produced overall values by merging the bitmaps. This eliminates features that are only supported by some MSC that make up a component or class. If bitmap properties are mismatched within a component we cannot support the mismatched feature. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 87 +++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 8 +++ 2 files changed, 95 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7eeeb44506b1..f1574f393e1d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -938,8 +938,95 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) return err; } +/* + * If a resource doesn't match class feature/configuration, do the right thing. + * For 'num' properties we can just take the minimum. + * For properties where the mismatched unused bits would make a difference, we + * nobble the class feature, as we can't configure all the resources. + * e.g. The L3 cache is composed of two resources with 13 and 17 portion + * bitmaps respectively. + */ +static void +__resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&mpam_list_lock); /* we modify class */ + + /* Clear missing features */ + cprops->features &= rprops->features; + + /* Clear incompatible features */ + if (cprops->cpbm_wd != rprops->cpbm_wd) + mpam_clear_feature(mpam_feat_cpor_part, &cprops->features); + if (cprops->mbw_pbm_bits != rprops->mbw_pbm_bits) + mpam_clear_feature(mpam_feat_mbw_part, &cprops->features); + + /* bwa_wd is a count of bits, fewer bits means less precision */ + if (cprops->bwa_wd != rprops->bwa_wd) + cprops->bwa_wd = min(cprops->bwa_wd, rprops->bwa_wd); + + /* For num properties, take the minimum */ + if (cprops->num_csu_mon != rprops->num_csu_mon) + cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); + if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) + cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); +} + +/* + * Copy the first component's first resources's properties and features to the + * class. __resource_props_mismatch() will remove conflicts. + * It is not possible to have a class with no components, or a component with + * no resources. + */ +static void mpam_enable_init_class_features(struct mpam_class *class) +{ + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + comp = list_first_entry_or_null(&class->components, + struct mpam_component, class_list); + if (WARN_ON(!comp)) + return; + + ris = list_first_entry_or_null(&comp->ris, + struct mpam_msc_ris, comp_list); + if (WARN_ON(!ris)) + return; + + class->props = ris->props; +} + +/* Merge all the common resource features into class. */ +static void mpam_enable_merge_features(void) +{ + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + mpam_enable_init_class_features(class); + + list_for_each_entry(comp, &class->components, class_list) { + list_for_each_entry(ris, &comp->ris, comp_list) { + __resource_props_mismatch(ris, class); + + class->nrdy_usec = max(class->nrdy_usec, + ris->msc->nrdy_usec); + } + } + } +} + static void mpam_enable_once(void) { + mutex_lock(&mpam_list_lock); + mpam_enable_merge_features(); + mutex_unlock(&mpam_list_lock); + mutex_lock(&mpam_cpuhp_state_lock); cpuhp_remove_state(mpam_cpuhp_state); mpam_cpuhp_state = 0; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 71e62594876d..db50f5e40d98 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -97,6 +97,12 @@ static inline void mpam_set_feature(enum mpam_device_features feat, props->features |= (1< Date: Thu, 28 Feb 2019 18:06:57 +0000 Subject: [PATCH 617/953] arm_mpam: Reset MSC controls from cpu hp callbacks ANBZ: #8686 commit 9216311dfa1ea9b3c8c4a0dc04657392e3b151b0 morse-linux. When a CPU comes online, it may bring a newly accessible MSC with it. Only the default partid has its value reset by hardware, and even then the MSC might not have been reset since its config was previously dirtyied. e.g. Kexec. Any in-use partid must have its configuration restored, or reset. In-use partids may be held in caches and evicted later. MSC are also reset when CPUs are taken offline to cover cases where firmware doesn't reset the MSC over reboot using UEFI, or kexec where there is no firmware involvement. If the configuration for a RIS has not been touched since it was brought online, it does not need resetting again. To reset, write the maximum values for all discovered controls. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 126 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 3 + 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f1574f393e1d..e281e6e910e0 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -644,8 +645,115 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) return 0; } +static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) +{ + u32 num_words, msb; + u32 bm = ~0; + int i; + + lockdep_assert_held(&msc->part_sel_lock); + + /* + * Write all ~0 to all but the last 32bit-word, which may + * have fewer bits... + */ + num_words = DIV_ROUND_UP(wd, 32); + for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) + __mpam_write_reg(msc, reg, bm); + + /* + * ....and then the last (maybe) partial 32bit word. When wd is a + * multiple of 32, msb should be 31 to write a full 32bit word. + */ + msb = (wd - 1) % 32; + bm = GENMASK(msb , 0); + if (bm) + __mpam_write_reg(msc, reg, bm); +} + +static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) +{ + struct mpam_msc *msc = ris->msc; + u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris->ris_idx, partid, msc); + + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); + + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); + + if (mpam_has_feature(mpam_feat_mbw_min, rprops)) + mpam_write_partsel_reg(msc, MBW_MIN, 0); + + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + + if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) + mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + spin_unlock(&msc->part_sel_lock); +} + +static void mpam_reset_ris(struct mpam_msc_ris *ris) +{ + u16 partid, partid_max; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&msc->lock); + + if (ris->in_reset_state) + return; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid < partid_max; partid++) + mpam_reset_ris_partid(ris, partid); +} + +static void mpam_reset_msc(struct mpam_msc *msc, bool online) +{ + int idx; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + mpam_reset_ris(ris); + + /* + * Set in_reset_state when coming online. The reset state + * for non-zero partid may be lost while the CPUs are offline. + */ + ris->in_reset_state = online; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static int mpam_cpu_online(unsigned int cpu) { + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + if (atomic_fetch_inc(&msc->online_refs) == 0) { + mutex_lock(&msc->lock); + mpam_reset_msc(msc, true); + mutex_unlock(&msc->lock); + } + } + srcu_read_unlock(&mpam_srcu, idx); + return 0; } @@ -684,6 +792,22 @@ static int mpam_discovery_cpu_online(unsigned int cpu) static int mpam_cpu_offline(unsigned int cpu) { + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + if (atomic_dec_and_test(&msc->online_refs)) { + mutex_lock(&msc->lock); + mpam_reset_msc(msc, false); + mutex_unlock(&msc->lock); + } + } + srcu_read_unlock(&mpam_srcu, idx); + return 0; } @@ -1043,7 +1167,7 @@ static void mpam_enable_once(void) mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", - mpam_partid_max + 1, mpam_pmg_max + 1); + READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } /* diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index db50f5e40d98..228d3c286f98 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -5,6 +5,7 @@ #define MPAM_INTERNAL_H #include +#include #include #include #include @@ -28,6 +29,7 @@ struct mpam_msc struct pcc_mbox_chan *pcc_chan; u32 nrdy_usec; cpumask_t accessibility; + atomic_t online_refs; struct mutex lock; bool probed; @@ -140,6 +142,7 @@ struct mpam_msc_ris u8 ris_idx; u64 idr; struct mpam_props props; + bool in_reset_state; cpumask_t affinity; -- Gitee From c620f87683aad9a860b5bbe05afa0be827af1044 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 May 2021 12:45:16 +0100 Subject: [PATCH 618/953] arm_mpam: Add a helper to touch an MSC from any CPU ANBZ: #8686 commit 92706dc754cdb62ddc9b6366140ec8ba28834f1d morse-linux. Resetting RIS entries from the cpuhp callback is easy as the callback occurs on the correct CPU. This won't be true for any other caller that wants to reset or configure an MSC. Add a helper that schedules the provided function if necessary. Prevent the cpuhp callbacks from changing the MSC state by taking the cpuhp lock. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 40 +++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e281e6e910e0..9156591fc4b9 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -699,21 +699,49 @@ static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) spin_unlock(&msc->part_sel_lock); } -static void mpam_reset_ris(struct mpam_msc_ris *ris) +/* + * Called via smp_call_on_cpu() to prevent migration, while still being + * pre-emptible. + */ +static int mpam_reset_ris(void *arg) { u16 partid, partid_max; - struct mpam_msc *msc = ris->msc; - - lockdep_assert_held(&msc->lock); + struct mpam_msc_ris *ris = arg; if (ris->in_reset_state) - return; + return 0; spin_lock(&partid_max_lock); partid_max = mpam_partid_max; spin_unlock(&partid_max_lock); for (partid = 0; partid < partid_max; partid++) mpam_reset_ris_partid(ris, partid); + + return 0; +} + +/* + * Get the preferred CPU for this MSC. If it is accessible from this CPU, + * this CPU is preferred. This can be preempted/migrated, it will only result + * in more work. + */ +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) +{ + int cpu = raw_smp_processor_id(); + + if (cpumask_test_cpu(cpu, &msc->accessibility)) + return cpu; + + return cpumask_first_and(&msc->accessibility, cpu_online_mask); +} + +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) +{ + lockdep_assert_irqs_enabled(); + lockdep_assert_cpus_held(); + lockdep_assert_held(&msc->lock); + + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); } static void mpam_reset_msc(struct mpam_msc *msc, bool online) @@ -725,7 +753,7 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(ris, &msc->ris, msc_list) { - mpam_reset_ris(ris); + mpam_touch_msc(msc, &mpam_reset_ris, ris); /* * Set in_reset_state when coming online. The reset state -- Gitee From 5b126cf67e0e07342d0b2b50d5996050cb24fd1b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 9 Feb 2021 13:46:35 +0000 Subject: [PATCH 619/953] arm_mpam: Extend reset logic to allow devices to be reset any time ANBZ: #8686 commit b213a7cb016cee3b229f33fc16a486a06d43b499 morse-linux. cpuhp callbacks aren't the only time the MSC configuration may need to be reset. Resctrl has an API call to reset a class. If an MPAM error interrupt arrives it indicates the driver has misprogrammed an MSC. The safest thing to do is reset all the MSCs and disable MPAM. Add a helper to reset RIS via their class. Call this from mpam_disable(), which can be scheduled from the error interrupt handler. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 9156591fc4b9..6091e87308ba 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1198,6 +1198,40 @@ static void mpam_enable_once(void) READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } +static void mpam_reset_class(struct mpam_class *class) +{ + int idx; + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(comp, &class->components, class_list) { + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, mpam_reset_ris, ris); + mutex_unlock(&ris->msc->lock); + ris->in_reset_state = true; + } + } + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Called in response to an error IRQ. + * All of MPAMs errors indicate a software bug, restore any modified + * controls to their reset values. + */ +void mpam_disable(void) +{ + int idx; + struct mpam_class *class; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); +} + /* * Enable mpam once all devices have been probed. * Scheduled by mpam_discovery_cpu_online() once all devices have been created. -- Gitee From 5ba3fa8aaef59b2bb6cb782fd800737cb79eea6a Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 8 Feb 2021 13:09:09 +0000 Subject: [PATCH 620/953] arm_mpam: Register and enable IRQs ANBZ: #8686 commit 7da1c7f9d9ef723f829bf44ed96e1fc4a46ef29f morse-linux. Register and enable error IRQs. All the MPAM error interrupts indicate a software bug, e.g. out of range partid. If the error interrupt is ever signalled, attempt to disable MPAM. Only the irq handler accesses the ESR register, so no locking is needed. The work to disable MPAM after an error needs to happen at process context, use a threaded interrupt. There is no support for percpu threaded interrupts, for now schedule the work to be done from the irq handler. Enabling the IRQs in the MSC may involve cross calling to a CPU that can access the MSC. CC: Rohit Mathew Tested-by: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 311 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 8 + 2 files changed, 309 insertions(+), 10 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 6091e87308ba..ac82c041999d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -64,6 +67,12 @@ static DEFINE_SPINLOCK(partid_max_lock); */ static DECLARE_WORK(mpam_enable_work, &mpam_enable); +/* + * All mpam error interrupts indicate a software bug. On receipt, disable the + * driver. + */ +static DECLARE_WORK(mpam_broken_work, &mpam_disable); + /* * An MSC is a container for resources, each identified by their RIS index. * Components are a group of RIS that control the same thing. @@ -127,6 +136,24 @@ static u64 mpam_msc_read_idr(struct mpam_msc *msc) return (idr_high << 32) | idr_low; } +static void mpam_msc_zero_esr(struct mpam_msc *msc) +{ + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR + 4); +} + +static u64 mpam_msc_read_esr(struct mpam_msc *msc) +{ + u64 esr_high = 0, esr_low; + + esr_low = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + esr_high = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR + 4); + + return (esr_high << 32) | esr_low; +} + static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) { u32 partsel; @@ -622,6 +649,7 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); msc->partid_max = min(msc->partid_max, partid_max); msc->pmg_max = min(msc->pmg_max, pmg_max); + msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXT_ESR, idr); ris = mpam_get_or_create_ris(msc, ris_idx); if (IS_ERR(ris)) { @@ -764,6 +792,12 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) srcu_read_unlock(&mpam_srcu, idx); } +static void _enable_percpu_irq(void *_irq) +{ + int *irq = _irq; + enable_percpu_irq(*irq, IRQ_TYPE_NONE); +} + static int mpam_cpu_online(unsigned int cpu) { int idx; @@ -774,11 +808,13 @@ static int mpam_cpu_online(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue; - if (atomic_fetch_inc(&msc->online_refs) == 0) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + _enable_percpu_irq(&msc->reenable_error_ppi); + + if (atomic_fetch_inc(&msc->online_refs) == 0) mpam_reset_msc(msc, true); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -828,11 +864,13 @@ static int mpam_cpu_offline(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue; - if (atomic_dec_and_test(&msc->online_refs)) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + disable_percpu_irq(msc->reenable_error_ppi); + + if (atomic_dec_and_test(&msc->online_refs)) mpam_reset_msc(msc, false); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -851,6 +889,50 @@ static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) mutex_unlock(&mpam_cpuhp_state_lock); } +static int __setup_ppi(struct mpam_msc *msc) +{ + int cpu; + + msc->error_dev_id = alloc_percpu_gfp(struct mpam_msc *, GFP_KERNEL); + if (!msc->error_dev_id) + return -ENOMEM; + + for_each_cpu(cpu, &msc->accessibility) { + struct mpam_msc *empty = *per_cpu_ptr(msc->error_dev_id, cpu); + if (empty != NULL) { + pr_err_once("%s shares PPI with %s!\n", dev_name(&msc->pdev->dev), + dev_name(&empty->pdev->dev)); + return -EBUSY; + } + *per_cpu_ptr(msc->error_dev_id, cpu) = msc; + } + + return 0; +} + +static int mpam_msc_setup_error_irq(struct mpam_msc *msc) +{ + int irq; + + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + return 0; + + /* Allocate and initialise the percpu device pointer for PPI */ + if (irq_is_percpu(irq)) + + return __setup_ppi(msc); + + /* sanity check: shared interrupts can be routed anywhere? */ + if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { + pr_err_once("msc:%u is a private resource with a shared error interrupt", + msc->id); + return -EINVAL; + } + + return 0; +} + static int mpam_dt_count_msc(void) { int count = 0; @@ -1021,6 +1103,13 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) spin_lock_init(&msc->part_sel_lock); spin_lock_init(&msc->mon_sel_lock); + err = mpam_msc_setup_error_irq(msc); + if (err) { + devm_kfree(&pdev->dev, msc); + msc = ERR_PTR(err); + break; + } + if (device_property_read_u32(&pdev->dev, "pcc-channel", &msc->pcc_subspace_id)) msc->iface = MPAM_IFACE_MMIO; @@ -1173,11 +1262,198 @@ static void mpam_enable_merge_features(void) } } +static char *mpam_errcode_names[16] = { + [0] = "No error", + [1] = "PARTID_SEL_Range", + [2] = "Req_PARTID_Range", + [3] = "MSMONCFG_ID_RANGE", + [4] = "Req_PMG_Range", + [5] = "Monitor_Range", + [6] = "intPARTID_Range", + [7] = "Unexpected_INTERNAL", + [8] = "Undefined_RIS_PART_SEL", + [9] = "RIS_No_Control", + [10] = "Undefined_RIS_MON_SEL", + [11] = "RIS_No_Monitor", + [12 ... 15] = "Reserved" +}; + +static int mpam_enable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(1, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static int mpam_disable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) +{ + u64 reg; + u16 partid; + u8 errcode, pmg, ris; + + if (WARN_ON_ONCE(!msc) || + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &msc->accessibility))) + return IRQ_NONE; + + reg = mpam_msc_read_esr(msc); + + errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); + if (!errcode) + return IRQ_NONE; + + /* Clear level triggered irq */ + mpam_msc_zero_esr(msc); + + partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); + pmg = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_PMG, reg); + + pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", + msc->id, mpam_errcode_names[errcode], partid, pmg, ris); + + if (irq_is_percpu(irq)) { + mpam_disable_msc_ecr(msc); + schedule_work(&mpam_broken_work); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = *(struct mpam_msc **)dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_spi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_disable_thread(int irq, void *dev_id); + +static int mpam_register_irqs(void) +{ + int err, irq; + struct mpam_msc *msc; + + lockdep_assert_cpus_held(); + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ + /* We anticipate sharing the interrupt with other MSCs */ + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, &mpam_ppi_handler, + "mpam:msc:error", + msc->error_dev_id); + if (err) + return err; + + mutex_lock(&msc->lock); + msc->reenable_error_ppi = irq; + smp_call_function_many(&msc->accessibility, + &_enable_percpu_irq, &irq, + true); + mutex_unlock(&msc->lock); + } else { + err = devm_request_threaded_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, + &mpam_disable_thread, + IRQF_SHARED, + "mpam:msc:error", msc); + if (err) + return err; + } + + mutex_lock(&msc->lock); + msc->error_irq_requested = true; + mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); + msc->error_irq_hw_enabled = true; + mutex_unlock(&msc->lock); + } + + return 0; +} + +static void mpam_unregister_irqs(void) +{ + int irq; + struct mpam_msc *msc; + + cpus_read_lock(); + /* take the lock as free_irq() can sleep */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + mutex_lock(&msc->lock); + if (msc->error_irq_hw_enabled) { + mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); + msc->error_irq_hw_enabled = false; + } + + if (msc->error_irq_requested) { + if (irq_is_percpu(irq)) { + msc->reenable_error_ppi = 0; + free_percpu_irq(irq, msc->error_dev_id); + } else { + devm_free_irq(&msc->pdev->dev, irq, msc); + } + msc->error_irq_requested = false; + } + mutex_unlock(&msc->lock); + } + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); +} + static void mpam_enable_once(void) { + int err; + + /* + * If all the MSC have been probed, enabling the IRQs happens next. + * That involves cross-calling to a CPU that can reach the MSC, and + * the locks must be taken in this order: + */ + cpus_read_lock(); mutex_lock(&mpam_list_lock); mpam_enable_merge_features(); + + err = mpam_register_irqs(); + if (err) + pr_warn("Failed to register irqs: %d\n", err); + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); + + if (err) { + schedule_work(&mpam_broken_work); + return; + } mutex_lock(&mpam_cpuhp_state_lock); cpuhp_remove_state(mpam_cpuhp_state); @@ -1221,15 +1497,31 @@ static void mpam_reset_class(struct mpam_class *class) * All of MPAMs errors indicate a software bug, restore any modified * controls to their reset values. */ -void mpam_disable(void) +static irqreturn_t mpam_disable_thread(int irq, void *dev_id) { int idx; struct mpam_class *class; + mutex_lock(&mpam_cpuhp_state_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_unregister_irqs(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) mpam_reset_class(class); srcu_read_unlock(&mpam_srcu, idx); + + return IRQ_HANDLED; +} + +void mpam_disable(struct work_struct *ignored) +{ + mpam_disable_thread(0, NULL); } /* @@ -1243,7 +1535,6 @@ void mpam_enable(struct work_struct *work) struct mpam_msc *msc; bool all_devices_probed = true; - /* Have we probed all the hw devices? */ mutex_lock(&mpam_list_lock); list_for_each_entry(msc, &mpam_all_msc, glbl_list) { mutex_lock(&msc->lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 228d3c286f98..b58d031d34e9 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -29,10 +29,17 @@ struct mpam_msc struct pcc_mbox_chan *pcc_chan; u32 nrdy_usec; cpumask_t accessibility; + bool has_extd_esr; + + int reenable_error_ppi; + struct mpam_msc * __percpu *error_dev_id; + atomic_t online_refs; struct mutex lock; bool probed; + bool error_irq_requested; + bool error_irq_hw_enabled; u16 partid_max; u8 pmg_max; unsigned long ris_idxs[128 / BITS_PER_LONG]; @@ -167,6 +174,7 @@ extern u8 mpam_pmg_max; /* Scheduled work callback to enable mpam once all MSC have been probed */ void mpam_enable(struct work_struct *work); +void mpam_disable(struct work_struct *work); /* * MPAM MSCs have the following register layout. See: -- Gitee From e066f1ab8a633dac4644aaf15e152b25beef939e Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 13 May 2021 15:21:13 +0100 Subject: [PATCH 621/953] arm_mpam: Use the arch static key to indicate when mpam is enabled ANBZ: #8686 commit 08d5c2c036ad3116c897722813c80f522e652334 morse-linux. Once all the MSC have been probed, the system wide usable number of PARTID is known and the configuration arrays can be allocated. After this point, checking all the MSC have been probed is pointless, and the cpuhp callbacks should restore the configuration, instead of just resetting the MSC. Enable the architecture's static key that indicates whether mpam is enabled and use this to skip the discovery work on cpu hotplug. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 6 ++++++ drivers/platform/mpam/mpam_internal.h | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ac82c041999d..04e1dfd1c5e3 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -828,6 +828,9 @@ static int mpam_discovery_cpu_online(unsigned int cpu) struct mpam_msc *msc; bool new_device_probed = false; + if (mpam_is_enabled()) + return 0; + mutex_lock(&mpam_list_lock); list_for_each_entry(msc, &mpam_all_msc, glbl_list) { if (!cpumask_test_cpu(cpu, &msc->accessibility)) @@ -1468,6 +1471,7 @@ static void mpam_enable_once(void) partid_max_published = true; spin_unlock(&partid_max_lock); + static_branch_enable(&mpam_enabled); mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", @@ -1509,6 +1513,8 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) } mutex_unlock(&mpam_cpuhp_state_lock); + static_branch_disable(&mpam_enabled); + mpam_unregister_irqs(); idx = srcu_read_lock(&mpam_srcu); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index b58d031d34e9..bc97d569a2bd 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -8,12 +8,20 @@ #include #include #include +#include #include #include #include #include #include +DECLARE_STATIC_KEY_FALSE(mpam_enabled); + +static inline bool mpam_is_enabled(void) +{ + return static_branch_likely(&mpam_enabled); +} + struct mpam_msc { /* member of mpam_all_msc */ -- Gitee From 4ca1716d49fa79d6d0a034a2bb9026e78ad6f5b2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 10 Feb 2021 18:11:20 +0000 Subject: [PATCH 622/953] arm_mpam: Allow configuration to be applied and restored during cpu online ANBZ: #8686 commit 7b3c212b6f49f3f9011a8870604f95716d6b1f22 morse-linux. When CPUs come online the original configuration should be restored. Once the maximum partid is known, allocate an configuration array for each component, and reprogram each RIS configuration from this. The MPAM spec describes how multiple controls can interact. To prevent this happening by accident, always reset controls that don't have a valid configuration. This allows the same helper to be used for configuration and reset. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 193 +++++++++++++++++++++++--- drivers/platform/mpam/mpam_internal.h | 24 +++- 2 files changed, 192 insertions(+), 25 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 04e1dfd1c5e3..7da57310d52c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -699,51 +699,89 @@ static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) __mpam_write_reg(msc, reg, bm); } -static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) +static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, + struct mpam_config *cfg) { struct mpam_msc *msc = ris->msc; u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; struct mpam_props *rprops = &ris->props; - lockdep_assert_held(&msc->lock); - spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); - if (mpam_has_feature(mpam_feat_cpor_part, rprops)) - mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { + if (mpam_has_feature(mpam_feat_cpor_part, cfg)) + mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, + rprops->cpbm_wd); + } - if (mpam_has_feature(mpam_feat_mbw_part, rprops)) - mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_part, cfg)) + mpam_write_partsel_reg(msc, MBW_PBM, cfg->mbw_pbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, + rprops->mbw_pbm_bits); + } if (mpam_has_feature(mpam_feat_mbw_min, rprops)) mpam_write_partsel_reg(msc, MBW_MIN, 0); - if (mpam_has_feature(mpam_feat_mbw_max, rprops)) - mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_max, cfg)) + mpam_write_partsel_reg(msc, MBW_MAX, cfg->mbw_max); + else + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + } if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); spin_unlock(&msc->part_sel_lock); } +struct reprogram_ris { + struct mpam_msc_ris *ris; + struct mpam_config *cfg; +}; + +/* Call with MSC lock held */ +static int mpam_reprogram_ris(void *_arg) +{ + u16 partid, partid_max; + struct reprogram_ris *arg = _arg; + struct mpam_msc_ris *ris = arg->ris; + struct mpam_config *cfg = arg->cfg; + + if (ris->in_reset_state) + return 0; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid < partid_max; partid++) + mpam_reprogram_ris_partid(ris, partid, cfg); + + return 0; +} + /* * Called via smp_call_on_cpu() to prevent migration, while still being * pre-emptible. */ static int mpam_reset_ris(void *arg) { - u16 partid, partid_max; struct mpam_msc_ris *ris = arg; + struct reprogram_ris reprogram_arg; + struct mpam_config empty_cfg = { 0 }; if (ris->in_reset_state) return 0; - spin_lock(&partid_max_lock); - partid_max = mpam_partid_max; - spin_unlock(&partid_max_lock); - for (partid = 0; partid < partid_max; partid++) - mpam_reset_ris_partid(ris, partid); + reprogram_arg.ris = ris; + reprogram_arg.cfg = &empty_cfg; + + mpam_reprogram_ris(&reprogram_arg); return 0; } @@ -792,6 +830,37 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) srcu_read_unlock(&mpam_srcu, idx); } +static void mpam_reprogram_msc(struct mpam_msc *msc) +{ + int idx; + u16 partid; + bool reset; + struct mpam_config *cfg; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + if (!mpam_is_enabled() && !ris->in_reset_state) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + ris->in_reset_state = true; + continue; + } + + reset = true; + for (partid = 0; partid < mpam_partid_max; partid++) { + cfg = &ris->comp->cfg[partid]; + if (cfg->features) + reset = false; + + mpam_reprogram_ris_partid(ris, partid, cfg); + } + ris->in_reset_state = reset; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static void _enable_percpu_irq(void *_irq) { int *irq = _irq; @@ -813,7 +882,7 @@ static int mpam_cpu_online(unsigned int cpu) _enable_percpu_irq(&msc->reenable_error_ppi); if (atomic_fetch_inc(&msc->online_refs) == 0) - mpam_reset_msc(msc, true); + mpam_reprogram_msc(msc); mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -1433,6 +1502,37 @@ static void mpam_unregister_irqs(void) cpus_read_unlock(); } +static int __allocate_component_cfg(struct mpam_component *comp) +{ + if (comp->cfg) + return 0; + + comp->cfg = kcalloc(mpam_partid_max, sizeof(*comp->cfg), GFP_KERNEL); + if (!comp->cfg) + return -ENOMEM; + + return 0; +} + +static int mpam_allocate_config(void) +{ + int err = 0; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) { + err = __allocate_component_cfg(comp); + if (err) + return err; + } + } + + return 0; +} + static void mpam_enable_once(void) { int err; @@ -1444,12 +1544,21 @@ static void mpam_enable_once(void) */ cpus_read_lock(); mutex_lock(&mpam_list_lock); - mpam_enable_merge_features(); + do { + mpam_enable_merge_features(); - err = mpam_register_irqs(); - if (err) - pr_warn("Failed to register irqs: %d\n", err); + err = mpam_allocate_config(); + if (err) { + pr_err("Failed to allocate configuration arrays.\n"); + break; + } + err = mpam_register_irqs(); + if (err) { + pr_warn("Failed to register irqs: %d\n", err); + break; + } + } while (0); mutex_unlock(&mpam_list_lock); cpus_read_unlock(); @@ -1486,6 +1595,8 @@ static void mpam_reset_class(struct mpam_class *class) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(comp, &class->components, class_list) { + memset(comp->cfg, 0, (mpam_partid_max * sizeof(*comp->cfg))); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { mutex_lock(&ris->msc->lock); mpam_touch_msc(ris->msc, mpam_reset_ris, ris); @@ -1575,6 +1686,48 @@ static int mpam_msc_drv_remove(struct platform_device *pdev) return 0; } +struct mpam_write_config_arg { + struct mpam_msc_ris *ris; + struct mpam_component *comp; + u16 partid; +}; + +static int __write_config(void *arg) +{ + struct mpam_write_config_arg *c = arg; + + mpam_reprogram_ris_partid(c->ris, c->partid, &c->comp->cfg[c->partid]); + + return 0; +} + +/* TODO: split into write_config/sync_config */ +/* TODO: add config_dirty bitmap to drive sync_config */ +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg) +{ + struct mpam_write_config_arg arg; + struct mpam_msc_ris *ris; + int idx; + + lockdep_assert_cpus_held(); + + comp->cfg[partid] = *cfg; + arg.comp = comp; + arg.partid = partid; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg.ris = ris; + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, __write_config, &arg); + mutex_unlock(&ris->msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + return 0; +} + static const struct of_device_id mpam_of_match[] = { { .compatible = "arm,mpam-msc", }, {}, diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index bc97d569a2bd..fa9386263dd5 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -102,11 +102,7 @@ struct mpam_props u16 num_mbwu_mon; }; -static inline bool mpam_has_feature(enum mpam_device_features feat, - struct mpam_props *props) -{ - return (1<features; -} +#define mpam_has_feature(_feat, x) ((1<<_feat) & (x)->features) static inline void mpam_set_feature(enum mpam_device_features feat, struct mpam_props *props) @@ -136,6 +132,15 @@ struct mpam_class struct list_head classes_list; }; +struct mpam_config { + /* Which configuration values are valid. 0 is used for reset */ + mpam_features_t features; + + u32 cpbm; + u32 mbw_pbm; + u16 mbw_max; +}; + struct mpam_component { u32 comp_id; @@ -145,6 +150,12 @@ struct mpam_component cpumask_t affinity; + /* + * Array of configuration values, indexed by partid. + * Read from cpuhp callbacks, hold the cpuhp lock when writing. + */ + struct mpam_config *cfg; + /* member of mpam_class:components */ struct list_head class_list; @@ -184,6 +195,9 @@ extern u8 mpam_pmg_max; void mpam_enable(struct work_struct *work); void mpam_disable(struct work_struct *work); +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource -- Gitee From b035382f8ce68ed444af3978773f9a7b3a8ed77e Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 28 Feb 2019 18:57:21 +0000 Subject: [PATCH 623/953] arm_mpam: Probe and reset the rest of the features ANBZ: #8686 commit 137b0cba6abe9f249f25b0d5f32a693dffc89b9a morse-linux. MPAM supports more features than are going to be exposed to resctrl. For partid other than 0, the reset values of these controls isn't known. Discover the rest of the features so they can be reset to avoid any side effects when resctrl is in use. PARTID narrowing allows MSC/RIS to support less configuration space than is usable. If this feature is found on a class of device we are likely to use, then reduce the partid_max to make it usable. This allows us to map a PARTID to itself. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 91 +++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 10 ++- 2 files changed, 100 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7da57310d52c..612c4b97d373 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -549,10 +549,20 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) int err; struct mpam_msc *msc = ris->msc; struct mpam_props *props = &ris->props; + struct mpam_class *class = ris->comp->class; lockdep_assert_held(&msc->lock); lockdep_assert_held(&msc->part_sel_lock); + /* Cache Capacity Partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CCAP_PART, ris->idr)) { + u32 ccap_features = mpam_read_partsel_reg(msc, CCAP_IDR); + + props->cmax_wd = FIELD_GET(MPAMF_CCAP_IDR_CMAX_WD, ccap_features); + if (props->cmax_wd) + mpam_set_feature(mpam_feat_ccap_part, props); + } + /* Cache Portion partitioning */ if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); @@ -575,6 +585,31 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) mpam_set_feature(mpam_feat_mbw_max, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MIN, mbw_features)) + mpam_set_feature(mpam_feat_mbw_min, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_PROP, mbw_features)) + mpam_set_feature(mpam_feat_mbw_prop, props); + } + + /* Priority partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_PRI_PART, ris->idr)) { + u32 pri_features = mpam_read_partsel_reg(msc, PRI_IDR); + + props->intpri_wd = FIELD_GET(MPAMF_PRI_IDR_INTPRI_WD, pri_features); + if (props->intpri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_INTPRI, pri_features)) { + mpam_set_feature(mpam_feat_intpri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_INTPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_intpri_part_0_low, props); + } + + props->dspri_wd = FIELD_GET(MPAMF_PRI_IDR_DSPRI_WD, pri_features); + if (props->dspri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_DSPRI, pri_features)) { + mpam_set_feature(mpam_feat_dspri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_DSPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_dspri_part_0_low, props); + } } /* Performance Monitoring */ @@ -610,6 +645,21 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); } } + + /* + * RIS with PARTID narrowing don't have enough storage for one + * configuration per PARTID. If these are in a class we could use, + * reduce the supported partid_max to match the numer of intpartid. + * If the class is unknown, just ignore it. + */ + if (FIELD_GET(MPAMF_IDR_HAS_PARTID_NRW, ris->idr) && + class->type != MPAM_CLASS_UNKNOWN) { + u32 nrwidr = mpam_read_partsel_reg(msc, PARTID_NRW_IDR); + u16 partid_max = FIELD_GET(MPAMF_PARTID_NRW_IDR_INTPARTID_MAX, nrwidr); + + mpam_set_feature(mpam_feat_partid_nrw, props); + msc->partid_max = min(msc->partid_max, partid_max); + } } static int mpam_msc_hw_probe(struct mpam_msc *msc) @@ -702,13 +752,21 @@ static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, struct mpam_config *cfg) { + u32 pri_val = 0; + u16 cmax = MPAMCFG_CMAX_CMAX; struct mpam_msc *msc = ris->msc; u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; struct mpam_props *rprops = &ris->props; + u16 dspri = GENMASK(rprops->dspri_wd, 0); + u16 intpri = GENMASK(rprops->intpri_wd, 0); spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); + if(mpam_has_feature(mpam_feat_partid_nrw, rprops)) + mpam_write_partsel_reg(msc, INTPARTID, + (MPAMCFG_PART_SEL_INTERNAL | partid)); + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { if (mpam_has_feature(mpam_feat_cpor_part, cfg)) mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); @@ -737,6 +795,26 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + + if (mpam_has_feature(mpam_feat_ccap_part, rprops)) + mpam_write_partsel_reg(msc, CMAX, cmax); + + if (mpam_has_feature(mpam_feat_intpri_part, rprops) || + mpam_has_feature(mpam_feat_dspri_part, rprops)) { + /* aces high? */ + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + intpri = 0; + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + dspri = 0; + + if (mpam_has_feature(mpam_feat_intpri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_INTPRI, intpri); + if (mpam_has_feature(mpam_feat_dspri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_DSPRI, dspri); + + mpam_write_partsel_reg(msc, PRI, pri_val); + } + spin_unlock(&msc->part_sel_lock); } @@ -1285,6 +1363,19 @@ __resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); + + if (cprops->intpri_wd != rprops->intpri_wd) + cprops->intpri_wd = min(cprops->intpri_wd, rprops->intpri_wd); + if (cprops->dspri_wd != rprops->dspri_wd) + cprops->dspri_wd = min(cprops->dspri_wd, rprops->dspri_wd); + + /* {int,ds}pri may not have differing 0-low behaviour */ + if (mpam_has_feature(mpam_feat_intpri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_intpri_part, &cprops->features); + if (mpam_has_feature(mpam_feat_dspri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_dspri_part, &cprops->features); } /* diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index fa9386263dd5..d2319eefa565 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -70,7 +70,7 @@ struct mpam_msc * When we compact the supported features, we don't care what they are. * Storing them as a bitmap makes life easy. */ -typedef u16 mpam_features_t; +typedef u32 mpam_features_t; /* Bits for mpam_features_t */ enum mpam_device_features { @@ -80,6 +80,10 @@ enum mpam_device_features { mpam_feat_mbw_min, mpam_feat_mbw_max, mpam_feat_mbw_prop, + mpam_feat_intpri_part, + mpam_feat_intpri_part_0_low, + mpam_feat_dspri_part, + mpam_feat_dspri_part_0_low, mpam_feat_msmon, mpam_feat_msmon_csu, mpam_feat_msmon_csu_capture, @@ -87,6 +91,7 @@ enum mpam_device_features { mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, + mpam_feat_partid_nrw, MPAM_FEATURE_LAST, }; #define MPAM_ALL_FEATURES ((1< Date: Fri, 25 Jun 2021 12:53:12 +0100 Subject: [PATCH 624/953] arm_mpam: Add helpers to allocate monitors ANBZ: #8686 commit e7b9d1fbfb0478c896b4fb96c9b214d4acc3d49e morse-linux. MPAM's MSC support a number of monitors, each of which supports bandwidth counters, or cache-storage-utilisation counters. To use a counter, a monitor needs to be configured. Add helpers to allocate and free CSU or MBWU monitors. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 2 ++ drivers/platform/mpam/mpam_internal.h | 35 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 612c4b97d373..82dffe8c03ef 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -244,6 +244,8 @@ mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) class->level = level_idx; class->type = type; INIT_LIST_HEAD_RCU(&class->classes_list); + ida_init(&class->ida_csu_mon); + ida_init(&class->ida_mbwu_mon); list_add_rcu(&class->classes_list, &mpam_classes); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index d2319eefa565..fd792f9fba86 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -138,6 +138,9 @@ struct mpam_class /* member of mpam_classes */ struct list_head classes_list; + + struct ida ida_csu_mon; + struct ida ida_mbwu_mon; }; struct mpam_config { @@ -191,6 +194,38 @@ struct mpam_msc_ris struct mpam_component *comp; }; +static inline int mpam_alloc_csu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_csu_mon, 0, cprops->num_csu_mon - 1, + GFP_KERNEL); +} + +static inline void mpam_free_csu_mon(struct mpam_class *class, int csu_mon) +{ + ida_free(&class->ida_csu_mon, csu_mon); +} + +static inline int mpam_alloc_mbwu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_mbwu_mon, 0, + cprops->num_mbwu_mon - 1, GFP_KERNEL); +} + +static inline void mpam_free_mbwu_mon(struct mpam_class *class, int mbwu_mon) +{ + ida_free(&class->ida_mbwu_mon, mbwu_mon); +} + /* List of all classes */ extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; -- Gitee From 48b977e41150eaec329e62b9b8fcd23712fb1ab5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 24 Jun 2021 16:49:50 +0100 Subject: [PATCH 625/953] arm_mpam: Add mpam_msmon_read() to read monitor value ANBZ: #8686 commit 8a3a9a2ba3085adade0d7c7075c508a12c93e143 morse-linux. Reaing a monitor involves configuring what you want to monitor, and reading the value. Components made up of multiple MSC may need values from each MSC. MSCs may take time to configure, returning 'not ready'. The maximum 'not ready' time should have been provided by firmware. Add mpam_msmon_read() to hide all this. If (one of) the MSC returns not ready, then wait the full timeout value before trying again. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 219 ++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 19 +++ 2 files changed, 238 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 82dffe8c03ef..3516528f2e14 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -123,6 +123,22 @@ static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) __mpam_write_reg(msc, MPAMCFG_##reg, val); \ }) +#define mpam_read_monsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + ____ret = __mpam_read_reg(msc, MSMON_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_monsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + __mpam_write_reg(msc, MSMON_##reg, val); \ +}) + static u64 mpam_msc_read_idr(struct mpam_msc *msc) { u64 idr_high = 0, idr_low; @@ -725,6 +741,209 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) return 0; } +struct mon_read +{ + struct mpam_msc_ris *ris; + struct mon_cfg *ctx; + enum mpam_device_features type; + u64 *val; + int err; +}; + +static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mon_cfg *ctx = m->ctx; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_CSU; + break; + case mpam_feat_msmon_mbwu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_MBWU; + break; + default: + return; + } + + /* + * For CSU counters its implementation-defined what happens when not + * filtering by partid. + */ + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; + + *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + if (m->ctx->match_pmg) { + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); + } + + if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props)) + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); +} + +static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT); + break; + case mpam_feat_msmon_mbwu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + break; + default: + return; + } +} + +static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, + u32 flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + /* + * Write the ctl_val with the enable bit cleared, reset the counter, + * then enable counter. + */ + switch (m->type) { + case mpam_feat_msmon_csu: + mpam_write_monsel_reg(msc, CFG_CSU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val); + mpam_write_monsel_reg(msc, CSU, 0); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + case mpam_feat_msmon_mbwu: + mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); + mpam_write_monsel_reg(msc, MBWU, 0); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + default: + return; + } +} + +static void __ris_msmon_read(void *arg) +{ + u64 now; + bool nrdy = false; + unsigned long flags; + struct mon_read *m = arg; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc_ris *ris = m->ris; + struct mpam_msc *msc = m->ris->msc; + u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; + + lockdep_assert_held(&msc->lock); + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, ctx->mon) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + /* + * Read the existing configuration to avoid re-writing the same values. + * This saves waiting for 'nrdy' on subsequent reads. + */ + read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); + gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); + if (cur_flt != flt_val || cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN)) + write_msmon_ctl_flt_vals(m, ctl_val, flt_val); + + switch (m->type) { + case mpam_feat_msmon_csu: + now = mpam_read_monsel_reg(msc, CSU); + break; + case mpam_feat_msmon_mbwu: + now = mpam_read_monsel_reg(msc, MBWU); + break; + default: + return; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + + nrdy = now & MSMON___NRDY; + if (nrdy) { + m->err = -EBUSY; + return; + } + + now = FIELD_GET(MSMON___VALUE, now); + *(m->val) += now; +} + +static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) +{ + int err, idx; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg->ris = ris; + + msc = ris->msc; + mutex_lock(&msc->lock); + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + mutex_unlock(&msc->lock); + if (!err && arg->err) + err = arg->err; + if (err) + break; + } + srcu_read_unlock(&mpam_srcu, idx); + + return err; +} + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features type, u64 *val) +{ + int err; + struct mon_read arg; + u64 wait_jiffies = 0; + struct mpam_props *cprops = &comp->class->props; + + might_sleep(); + + if (!mpam_is_enabled()) + return -EIO; + + if (!mpam_has_feature(type, cprops)) + return -EOPNOTSUPP; + + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + if (err == -EBUSY) + wait_jiffies = usecs_to_jiffies(comp->class->nrdy_usec); + + while (wait_jiffies) + wait_jiffies = schedule_timeout_uninterruptible(wait_jiffies); + + if (err == -EBUSY) { + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + } + + return err; +} + static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) { u32 num_words, msb; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index fd792f9fba86..06a31e5d9610 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -62,6 +62,7 @@ struct mpam_msc * If needed, take msc->lock first. */ spinlock_t part_sel_lock; + spinlock_t mon_sel_lock; void __iomem * mapped_hwpage; size_t mapped_hwpage_sz; }; @@ -194,6 +195,21 @@ struct mpam_msc_ris struct mpam_component *comp; }; +/* The values for MSMON_CFG_MBWU_FLT.RWBW */ +enum mon_filter_options { + COUNT_BOTH = 0, + COUNT_WRITE = 1, + COUNT_READ = 2, +}; + +struct mon_cfg { + u16 mon; + u8 pmg; + bool match_pmg; + u32 partid; + enum mon_filter_options opts; +}; + static inline int mpam_alloc_csu_mon(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -241,6 +257,9 @@ void mpam_disable(struct work_struct *work); int mpam_apply_config(struct mpam_component *comp, u16 partid, struct mpam_config *cfg); +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features, u64 *val); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource -- Gitee From f475cf279c2bd9eeaa11d540abecdd39545772a2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 8 Sep 2021 12:23:40 +0100 Subject: [PATCH 626/953] arm_mpam: Track bandwidth counter state for overflow and power management ANBZ: #8686 commit 1120466a35068928977b61c0fb6e5734e664f1ba morse-linux. Bandwidth counters need to run continuously to correctly reflect the bandwidth. The value read may be lower than the previous value read in the case of overflow and when the hardware is reset due to CPU hotplug. Add struct mbwu_state to track the bandwidth counter to allow overflow and power management to be handled. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 148 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 52 ++++++--- 2 files changed, 181 insertions(+), 19 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 3516528f2e14..930e14471378 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -773,6 +773,7 @@ static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); if (m->ctx->match_pmg) { *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); @@ -805,6 +806,7 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, u32 flt_val) { struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; /* * Write the ctl_val with the enable bit cleared, reset the counter, @@ -822,21 +824,33 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); mpam_write_monsel_reg(msc, MBWU, 0); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + + mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; + if (mbwu_state) + mbwu_state->prev_val = 0; + break; default: return; } } +static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) +{ + /* TODO: scaling, and long counters */ + return GENMASK_ULL(30, 0); +} + static void __ris_msmon_read(void *arg) { - u64 now; bool nrdy = false; unsigned long flags; struct mon_read *m = arg; + u64 now, overflow_val = 0; struct mon_cfg *ctx = m->ctx; struct mpam_msc_ris *ris = m->ris; struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; lockdep_assert_held(&msc->lock); @@ -858,22 +872,41 @@ static void __ris_msmon_read(void *arg) switch (m->type) { case mpam_feat_msmon_csu: now = mpam_read_monsel_reg(msc, CSU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); break; case mpam_feat_msmon_mbwu: now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + + if (nrdy) + break; + + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (!mbwu_state) + break; + + /* Add any pre-overflow value to the mbwu_state->val */ + if (mbwu_state->prev_val > now) + overflow_val = mpam_msmon_overflow_val(ris) - mbwu_state->prev_val; + + mbwu_state->prev_val = now; + mbwu_state->correction += overflow_val; + + /* Include bandwidth consumed before the last hardware reset */ + now += mbwu_state->correction; break; default: return; } spin_unlock_irqrestore(&msc->mon_sel_lock, flags); - nrdy = now & MSMON___NRDY; if (nrdy) { m->err = -EBUSY; return; } - now = FIELD_GET(MSMON___VALUE, now); *(m->val) += now; } @@ -1064,6 +1097,68 @@ static int mpam_reprogram_ris(void *_arg) return 0; } +static int mpam_restore_mbwu_state(void *_ris) +{ + int i; + struct mon_read mwbu_arg; + struct mpam_msc_ris *ris = _ris; + + lockdep_assert_held(&ris->msc->lock); + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + if (ris->mbwu_state[i].enabled) { + mwbu_arg.ris = ris; + mwbu_arg.ctx = &ris->mbwu_state[i].cfg; + mwbu_arg.type = mpam_feat_msmon_mbwu; + + __ris_msmon_read(&mwbu_arg); + } + } + + return 0; +} + +static int mpam_save_mbwu_state(void *arg) +{ + int i; + u64 val; + struct mon_cfg *cfg; + unsigned long flags; + u32 cur_flt, cur_ctl, mon_sel; + struct mpam_msc_ris *ris = arg; + struct mpam_msc *msc = ris->msc; + struct msmon_mbwu_state *mbwu_state; + + lockdep_assert_held(&msc->lock); + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + mbwu_state = &ris->mbwu_state[i]; + cfg = &mbwu_state->cfg; + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, i) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + cur_flt = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); + + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + + cfg->mon = i; + cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); + cfg->match_pmg = FIELD_GET(MSMON_CFG_x_CTL_MATCH_PMG, cur_ctl); + cfg->partid = FIELD_GET(MSMON_CFG_MBWU_FLT_PARTID, cur_flt); + mbwu_state->correction += val; + mbwu_state->enabled = FIELD_GET(MSMON_CFG_x_CTL_EN, cur_ctl); + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + + return 0; +} + /* * Called via smp_call_on_cpu() to prevent migration, while still being * pre-emptible. @@ -1125,6 +1220,9 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) * for non-zero partid may be lost while the CPUs are offline. */ ris->in_reset_state = online; + + if (mpam_is_enabled() && !online) + mpam_touch_msc(msc, &mpam_save_mbwu_state, ris); } srcu_read_unlock(&mpam_srcu, idx); } @@ -1156,6 +1254,9 @@ static void mpam_reprogram_msc(struct mpam_msc *msc) mpam_reprogram_ris_partid(ris, partid, cfg); } ris->in_reset_state = reset; + + if (mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + mpam_touch_msc(msc, &mpam_restore_mbwu_state, ris); } srcu_read_unlock(&mpam_srcu, idx); } @@ -1814,8 +1915,31 @@ static void mpam_unregister_irqs(void) cpus_read_unlock(); } +static void __destroy_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + kfree(comp->cfg); + list_for_each_entry(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + mbwu_state = ris->mbwu_state; + ris->mbwu_state = NULL; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + + kfree(mbwu_state); + } +} + static int __allocate_component_cfg(struct mpam_component *comp) { + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + if (comp->cfg) return 0; @@ -1823,6 +1947,24 @@ static int __allocate_component_cfg(struct mpam_component *comp) if (!comp->cfg) return -ENOMEM; + list_for_each_entry(ris, &comp->ris, comp_list) { + if (!ris->props.num_mbwu_mon) + continue; + + mbwu_state = kcalloc(ris->props.num_mbwu_mon, + sizeof(*ris->mbwu_state), GFP_KERNEL); + if (!mbwu_state) { + __destroy_component_cfg(comp); + return -ENOMEM; + } + + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + ris->mbwu_state = mbwu_state; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + } + return 0; } diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 06a31e5d9610..e546a8612dab 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -175,8 +175,40 @@ struct mpam_component struct mpam_class *class; }; -struct mpam_msc_ris -{ +/* The values for MSMON_CFG_MBWU_FLT.RWBW */ +enum mon_filter_options { + COUNT_BOTH = 0, + COUNT_WRITE = 1, + COUNT_READ = 2, +}; + +struct mon_cfg { + u16 mon; + u8 pmg; + bool match_pmg; + u32 partid; + enum mon_filter_options opts; +}; + +/* + * Changes to enabled and cfg are protected by the msc->lock. + * Changes to prev_val and correction are protected by the msc's mon_sel_lock. + */ +struct msmon_mbwu_state { + bool enabled; + struct mon_cfg cfg; + + /* The value last read from the hardware. Used to detect overflow. */ + u64 prev_val; + + /* + * The value to add to the new reading to account for power management, + * and shifts to trigger the overflow interrupt. + */ + u64 correction; +}; + +struct mpam_msc_ris { u8 ris_idx; u64 idr; struct mpam_props props; @@ -193,21 +225,9 @@ struct mpam_msc_ris /* parents: */ struct mpam_msc *msc; struct mpam_component *comp; -}; -/* The values for MSMON_CFG_MBWU_FLT.RWBW */ -enum mon_filter_options { - COUNT_BOTH = 0, - COUNT_WRITE = 1, - COUNT_READ = 2, -}; - -struct mon_cfg { - u16 mon; - u8 pmg; - bool match_pmg; - u32 partid; - enum mon_filter_options opts; + /* msmon mbwu configuration is preserved over reset */ + struct msmon_mbwu_state *mbwu_state; }; static inline int mpam_alloc_csu_mon(struct mpam_class *class) -- Gitee From 66a8ab3a06096c9ef2f9798a02965124cdc985b4 Mon Sep 17 00:00:00 2001 From: Rohit Mathew Date: Tue, 7 Feb 2023 19:14:17 +0000 Subject: [PATCH 627/953] arm_mpam: Probe for long/lwd mbwu counters ANBZ: #8686 commit dde6e1a3721f18c9e955dd666afcd84eec30faef morse-linux. mpam v0.1 and versions above v1.0 support optional long counter for memory bandwidth monitoring. The MPAMF_MBWUMON_IDR register have fields indicating support for long counters. As of now, a 44 bit counter represented by HAS_LONG field (bit 30) and a 63 bit counter represented by LWD (bit 29) can be optionally integrated. Probe for these counters and set corresponding feature bits if any of these counters are present. Signed-off-by: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 22 ++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 7 +++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 930e14471378..c2c5dfa77083 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -653,6 +653,7 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); } if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + bool has_long; u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); @@ -661,6 +662,27 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + + /* + * Treat long counter and its extension, lwd as mutually + * exclusive feature bits. Though these are dependent + * fields at the implementation level, there would never + * be a need for mpam_feat_msmon_mbwu_44counter (long + * counter) and mpam_feat_msmon_mbwu_63counter (lwd) + * bits to be set together. + * + * mpam_feat_msmon_mbwu isn't treated as an exclusive + * bit as this feature bit would be used as the "front + * facing feature bit" for any checks related to mbwu + * monitors. + */ + has_long = FIELD_GET(MPAMF_MBWUMON_IDR_HAS_LONG, mbwumonidr); + if (props->num_mbwu_mon && has_long) { + if (FIELD_GET(MPAMF_MBWUMON_IDR_LWD, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_63counter, props); + else + mpam_set_feature(mpam_feat_msmon_mbwu_44counter, props); + } } } diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index e546a8612dab..99790ba74768 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -88,7 +88,14 @@ enum mpam_device_features { mpam_feat_msmon, mpam_feat_msmon_csu, mpam_feat_msmon_csu_capture, + /* + * Having mpam_feat_msmon_mbwu set doesn't mean the regular 31 bit MBWU + * counter would be used. The exact counter used is decided based on the + * status of mpam_feat_msmon_mbwu_l/mpam_feat_msmon_mbwu_lwd as well. + */ mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_44counter, + mpam_feat_msmon_mbwu_63counter, mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, -- Gitee From dd47022c2f7bd50c5b478a273af69872e614b868 Mon Sep 17 00:00:00 2001 From: Rohit Mathew Date: Mon, 20 Feb 2023 16:06:39 +0000 Subject: [PATCH 628/953] arm_mpam: Use long MBWU counters if supported ANBZ: #8686 commit abab48aae75632bac08e42751734c48c3a024a32 morse-linux. If the 44 bit (long) or 63 bit (LWD) counters are detected on probing the RIS, use long/LWD counter instead of the regular 31 bit mbwu counter. Only 32bit accesses to the MSC are required to be supported by the spec, but these registers are 64bits. The lower half may overflow into the higher half between two 32bit reads. To avoid this, use a helper that reads the top half twice to check for overflow. Signed-off-by: Rohit Mathew [morse: merged multiple patches from Rohit] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 86 ++++++++++++++++++++++++--- drivers/platform/mpam/mpam_internal.h | 7 ++- 2 files changed, 84 insertions(+), 9 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index c2c5dfa77083..7f1f848281be 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -772,6 +772,48 @@ struct mon_read int err; }; +static bool mpam_ris_has_mbwu_long_counter(struct mpam_msc_ris *ris) +{ + return (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props) || + mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)); +} + +static u64 mpam_msc_read_mbwu_l(struct mpam_msc *msc) +{ + int retry = 3; + u32 mbwu_l_low; + u64 mbwu_l_high1, mbwu_l_high2; + + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + do { + mbwu_l_high1 = mbwu_l_high2; + mbwu_l_low = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L); + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + + retry--; + } while (mbwu_l_high1 != mbwu_l_high2 && retry > 0); + + if (mbwu_l_high2 == mbwu_l_high1) + return (mbwu_l_high1 << 32) | mbwu_l_low; + return MSMON___NRDY_L; +} + +static void mpam_msc_zero_mbwu_l(struct mpam_msc *msc) +{ + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L); + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L + 4); +} + static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, u32 *flt_val) { @@ -844,7 +886,12 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, case mpam_feat_msmon_mbwu: mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); - mpam_write_monsel_reg(msc, MBWU, 0); + + if (mpam_ris_has_mbwu_long_counter(m->ris)) + mpam_msc_zero_mbwu_l(m->ris->msc); + else + mpam_write_monsel_reg(msc, MBWU, 0); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; @@ -859,8 +906,13 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) { - /* TODO: scaling, and long counters */ - return GENMASK_ULL(30, 0); + /* TODO: implement scaling counters */ + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + return GENMASK_ULL(62, 0); + else if (mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)) + return GENMASK_ULL(43, 0); + else + return GENMASK_ULL(30, 0); } static void __ris_msmon_read(void *arg) @@ -898,9 +950,22 @@ static void __ris_msmon_read(void *arg) now = FIELD_GET(MSMON___VALUE, now); break; case mpam_feat_msmon_mbwu: - now = mpam_read_monsel_reg(msc, MBWU); - nrdy = now & MSMON___NRDY; - now = FIELD_GET(MSMON___VALUE, now); + /* + * If long or lwd counters are supported, use them, else revert + * to the 32 bit counter. + */ + if (mpam_ris_has_mbwu_long_counter(ris)) { + now = mpam_msc_read_mbwu_l(msc); + nrdy = now & MSMON___NRDY_L; + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + now = FIELD_GET(MSMON___LWD_VALUE, now); + else + now = FIELD_GET(MSMON___L_VALUE, now); + } else { + now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + } if (nrdy) break; @@ -1166,8 +1231,13 @@ static int mpam_save_mbwu_state(void *arg) cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); - val = mpam_read_monsel_reg(msc, MBWU); - mpam_write_monsel_reg(msc, MBWU, 0); + if (mpam_ris_has_mbwu_long_counter(ris)) { + val = mpam_msc_read_mbwu_l(msc); + mpam_msc_zero_mbwu_l(msc); + } else { + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + } cfg->mon = i; cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 99790ba74768..cafcce3e9efb 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -330,6 +330,8 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, #define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ #define MSMON_MBWU 0x0860 /* current mem-bw usage value */ #define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_MBWU_L 0x0880 /* current long mem-bw usage value */ +#define MSMON_MBWU_CAPTURE_L 0x0890 /* last long mem-bw value captured */ #define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ #define MPAMF_ESR 0x00F8 /* error status register */ #define MPAMF_ECR 0x00F0 /* error control register */ @@ -533,7 +535,10 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, */ #define MSMON___VALUE GENMASK(30, 0) #define MSMON___NRDY BIT(31) -#define MSMON_MBWU_L_VALUE GENMASK(62, 0) +#define MSMON___NRDY_L BIT(63) +#define MSMON___L_VALUE GENMASK(43, 0) +#define MSMON___LWD_VALUE GENMASK(62, 0) + /* * MSMON_CAPT_EVNT - Memory system performance monitoring capture event * generation register -- Gitee From 874697737af110a7c38b3a43cac36d345a7478da Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 10 Sep 2021 12:00:01 +0100 Subject: [PATCH 629/953] arm_mpam: Add helper to reset saved mbwu state ANBZ: #8686 commit ca01e27727c44fc0deef570f3e5bc8e05e16e092 morse-linux. resctrl expects to reset the bandwidth counters when the filesystem is mounted. To allow this, add a helper that clears the saved mbwu state. Instead of cross calling to each CPU that can access the component MSC to write to the counter, set a flag that causes it to be zero'd on the the next read. This is easily done by forcing a configuration update. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 44 +++++++++++++++++++++++---- drivers/platform/mpam/mpam_internal.h | 5 ++- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7f1f848281be..f9f22d1d698c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -919,9 +919,11 @@ static void __ris_msmon_read(void *arg) { bool nrdy = false; unsigned long flags; + bool config_mismatch; struct mon_read *m = arg; u64 now, overflow_val = 0; struct mon_cfg *ctx = m->ctx; + bool reset_on_next_read = false; struct mpam_msc_ris *ris = m->ris; struct mpam_msc *msc = m->ris->msc; struct msmon_mbwu_state *mbwu_state; @@ -934,13 +936,24 @@ static void __ris_msmon_read(void *arg) FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + if (m->type == mpam_feat_msmon_mbwu) { + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (mbwu_state) { + reset_on_next_read = mbwu_state->reset_on_next_read; + mbwu_state->reset_on_next_read = false; + } + } + /* * Read the existing configuration to avoid re-writing the same values. * This saves waiting for 'nrdy' on subsequent reads. */ read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); - if (cur_flt != flt_val || cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN)) + config_mismatch = cur_flt != flt_val || + cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN); + + if (config_mismatch || reset_on_next_read) write_msmon_ctl_flt_vals(m, ctl_val, flt_val); switch (m->type) { @@ -970,7 +983,6 @@ static void __ris_msmon_read(void *arg) if (nrdy) break; - mbwu_state = &ris->mbwu_state[ctx->mon]; if (!mbwu_state) break; @@ -1064,6 +1076,30 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, return err; } +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) +{ + int idx; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + ris->mbwu_state[ctx->mon].correction = 0; + ris->mbwu_state[ctx->mon].reset_on_next_read = true; + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) { u32 num_words, msb; @@ -1190,8 +1226,6 @@ static int mpam_restore_mbwu_state(void *_ris) struct mon_read mwbu_arg; struct mpam_msc_ris *ris = _ris; - lockdep_assert_held(&ris->msc->lock); - for (i = 0; i < ris->props.num_mbwu_mon; i++) { if (ris->mbwu_state[i].enabled) { mwbu_arg.ris = ris; @@ -1216,8 +1250,6 @@ static int mpam_save_mbwu_state(void *arg) struct mpam_msc *msc = ris->msc; struct msmon_mbwu_state *mbwu_state; - lockdep_assert_held(&msc->lock); - for (i = 0; i < ris->props.num_mbwu_mon; i++) { mbwu_state = &ris->mbwu_state[i]; cfg = &mbwu_state->cfg; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index cafcce3e9efb..3f9478c90faf 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -199,10 +199,12 @@ struct mon_cfg { /* * Changes to enabled and cfg are protected by the msc->lock. - * Changes to prev_val and correction are protected by the msc's mon_sel_lock. + * Changes to reset_on_next_read, prev_val and correction are protected by the + * msc's mon_sel_lock. */ struct msmon_mbwu_state { bool enabled; + bool reset_on_next_read; struct mon_cfg cfg; /* The value last read from the hardware. Used to detect overflow. */ @@ -286,6 +288,7 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); /* * MPAM MSCs have the following register layout. See: -- Gitee From 2f74a2bb3d8e82536a6224a38554435a5e8c7956 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 Jun 2019 17:02:09 +0100 Subject: [PATCH 630/953] arm_mpam: resctrl: Add boilerplate cpuhp and domain allocation ANBZ: #8686 commit 2d0f0357cb0a8c43d3d037ea9403a863dbb5fe9e morse-linux. resctrl has its own data structures to describe its resources. We can't use these directly as we play tricks with the 'MBA' resource, picking the MPAM controls or monitors that best apply. We may export the same component as both L3 and MBA. Add mpam_resctrl_exports[] as the array of class->resctrl mappings we are exporting, and add the cpuhp hooks that allocated and free the resctrl domain structures. While we're here, plumb in a few other obvious things. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/Makefile | 2 +- drivers/platform/mpam/mpam_devices.c | 12 ++ drivers/platform/mpam/mpam_internal.h | 15 ++ drivers/platform/mpam/mpam_resctrl.c | 237 ++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 + 5 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 drivers/platform/mpam/mpam_resctrl.c diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile index 8ad69bfa2aa2..37693be531c3 100644 --- a/drivers/platform/mpam/Makefile +++ b/drivers/platform/mpam/Makefile @@ -1 +1 @@ -obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o mpam_resctrl.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f9f22d1d698c..28f6010df6dc 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1411,6 +1411,9 @@ static int mpam_cpu_online(unsigned int cpu) } srcu_read_unlock(&mpam_srcu, idx); + if (mpam_is_enabled()) + mpam_resctrl_online_cpu(cpu); + return 0; } @@ -1470,6 +1473,9 @@ static int mpam_cpu_offline(unsigned int cpu) } srcu_read_unlock(&mpam_srcu, idx); + if (mpam_is_enabled()) + mpam_resctrl_offline_cpu(cpu); + return 0; } @@ -2140,6 +2146,12 @@ static void mpam_enable_once(void) mutex_unlock(&mpam_list_lock); cpus_read_unlock(); + if (!err) { + err = mpam_resctrl_setup(); + if (err) + pr_err("Failed to initialise resctrl: %d\n", err); + } + if (err) { schedule_work(&mpam_broken_work); return; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 3f9478c90faf..c9d9abb87cff 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -239,6 +239,16 @@ struct mpam_msc_ris { struct msmon_mbwu_state *mbwu_state; }; +struct mpam_resctrl_dom { + struct mpam_component *comp; + struct rdt_domain resctrl_dom; +}; + +struct mpam_resctrl_res { + struct mpam_class *class; + struct rdt_resource resctrl_res; +}; + static inline int mpam_alloc_csu_mon(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -290,6 +300,11 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +int mpam_resctrl_online_cpu(unsigned int cpu); +int mpam_resctrl_offline_cpu(unsigned int cpu); + +int mpam_resctrl_setup(void); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c new file mode 100644 index 000000000000..b9c1292ff630 --- /dev/null +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#define pr_fmt(fmt) "mpam: resctrl: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mpam_internal.h" + +/* + * The classes we've picked to map to resctrl resources. + * Class pointer may be NULL. + */ +static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; + +static bool exposed_alloc_capable; +static bool exposed_mon_capable; + +bool resctrl_arch_alloc_capable(void) +{ + return exposed_alloc_capable; +} + +bool resctrl_arch_mon_capable(void) +{ + return exposed_mon_capable; +} + +/* + * MSC may raise an error interrupt if it sees an out or range partid/pmg, + * and go on to truncate the value. Regardless of what the hardware supports, + * only the system wide safe value is safe to use. + */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) +{ + return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); +} + +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &mpam_resctrl_exports[l].resctrl_res; +} + +static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) +{ + /* TODO: initialise the resctrl resources */ + + return 0; +} + +/* Called with the mpam classes lock held */ +int mpam_resctrl_setup(void) +{ + int err = 0; + struct mpam_resctrl_res *res; + enum resctrl_res_level i; + + cpus_read_lock(); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + INIT_LIST_HEAD(&res->resctrl_res.domains); + INIT_LIST_HEAD(&res->resctrl_res.evt_list); + res->resctrl_res.rid = i; + } + + /* TODO: pick MPAM classes to map to resctrl resources */ + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + if (!res->class) + continue; // dummy resource + + err = mpam_resctrl_resource_init(res); + if (err) + break; + } + cpus_read_unlock(); + + if (!err && !exposed_alloc_capable && !exposed_mon_capable) + err = -EOPNOTSUPP; + + if (!err) { + if (!is_power_of_2(mpam_pmg_max + 1)) { + /* + * If not all the partid*pmg values are valid indexes, + * resctrl may allocate pmg that don't exist. This + * should cause an error interrupt. + */ + pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); + } + err = resctrl_init(); + } + + return err; +} + +static struct mpam_resctrl_dom * +mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) +{ + struct mpam_resctrl_dom *dom; + struct mpam_class *class = res->class; + struct mpam_component *comp_iter, *comp; + + comp = NULL; + list_for_each_entry(comp_iter, &class->components, class_list) { + if (cpumask_test_cpu(cpu, &comp_iter->affinity)) { + comp = comp_iter; + break; + } + } + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!comp)) + return ERR_PTR(-EINVAL); + + dom = kzalloc_node(sizeof(*dom), GFP_KERNEL, cpu_to_node(cpu)); + if (!dom) + return ERR_PTR(-ENOMEM); + + dom->comp = comp; + INIT_LIST_HEAD(&dom->resctrl_dom.list); + dom->resctrl_dom.id = comp->comp_id; + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + + /* TODO: this list should be sorted */ + list_add_tail(&dom->resctrl_dom.list, &res->resctrl_res.domains); + + return dom; +} + +/* Like resctrl_get_domain_from_cpu(), but for offline CPUs */ +static struct mpam_resctrl_dom * +mpam_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cpumask_test_cpu(cpu, &dom->comp->affinity)) + return dom; + } + + return NULL; +} + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &r->domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + if (dom->comp->comp_id == id) + return &dom->resctrl_dom; + } + + return NULL; +} + +int mpam_resctrl_online_cpu(unsigned int cpu) +{ + int i; + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy_resource; + + dom = mpam_get_domain_from_cpu(cpu, res); + if (dom) { + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + continue; + } + + dom = mpam_resctrl_alloc_domain(cpu, res); + if (IS_ERR(dom)) + return PTR_ERR(dom); + } + + return 0; +} + +int mpam_resctrl_offline_cpu(unsigned int cpu) +{ + int i; + struct rdt_domain *d; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + d = resctrl_get_domain_from_cpu(cpu, &res->resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /* The last one standing was ahead of us... */ + if (WARN_ON_ONCE(!d)) + continue; + + cpumask_clear_cpu(cpu, &d->cpu_mask); + + if (!cpumask_empty(&d->cpu_mask)) + continue; + + list_del(&d->list); + kfree(dom); + } + + return 0; +} diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 40e09b4d236b..27c3ad9912ef 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -39,4 +39,8 @@ int mpam_register_requestor(u16 partid_max, u8 pmg_max); int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); + +bool resctrl_arch_alloc_capable(void); +bool resctrl_arch_mon_capable(void); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From b9b3da9795a224286b8fe5a6e2d4d538cc483d6b Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 12 Jun 2019 13:51:30 +0100 Subject: [PATCH 631/953] arm_mpam: resctrl: Pick the caches we will use as resctrl resources ANBZ: #8686 commit 6e4fe28163a54f1426e3433c51c462f6424f35f2 morse-linux. Sytems with MPAM support may have a variety of control types at any point of their system layout. We can only expose certain types of control, and only if they exist at particular locations. Start with the well-know caches. These have to be depth 2 or 3 and support MPAM's cache portion bitmap controls, with a number of portions fewer that resctrl's limit. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 134 ++++++++++++++++++++++++++- include/linux/arm_mpam.h | 7 ++ 2 files changed, 137 insertions(+), 4 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index b9c1292ff630..d294db50a651 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -27,6 +27,7 @@ static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; static bool exposed_alloc_capable; static bool exposed_mon_capable; +static struct mpam_class *mbm_local_class; bool resctrl_arch_alloc_capable(void) { @@ -38,6 +39,11 @@ bool resctrl_arch_mon_capable(void) return exposed_mon_capable; } +bool resctrl_arch_is_mbm_local_enabled(void) +{ + return mbm_local_class; +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -56,14 +62,133 @@ struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) return &mpam_resctrl_exports[l].resctrl_res; } +static bool cache_has_usable_cpor(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_cpor_part, cprops)) + return false; + + /* TODO: Scaling is not yet supported */ + return (class->props.cpbm_wd <= RESCTRL_MAX_CBM); +} + +static bool cache_has_usable_csu(struct mpam_class *class) +{ + struct mpam_props *cprops; + + if (!class) + return false; + + cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return false; + + /* + * CSU counters settle on the value, so we can get away with + * having only one. + */ + if (!cprops->num_csu_mon) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); +} + +/* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ +static void mpam_resctrl_pick_caches(void) +{ + int idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + bool has_cpor = cache_has_usable_cpor(class); + + if (class->type != MPAM_CLASS_CACHE) { + pr_debug("pick_caches: Class is not a cache\n"); + continue; + } + + if (class->level != 2 && class->level != 3) { + pr_debug("pick_caches: not L2 or L3\n"); + continue; + } + + if (class->level == 2 && !has_cpor) { + pr_debug("pick_caches: L2 missing CPOR\n"); + continue; + } else if (!has_cpor && !cache_has_usable_csu(class)) { + pr_debug("pick_caches: Cache misses CPOR and CSU\n"); + continue; + } + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) { + pr_debug("pick_caches: Class has missing CPUs\n"); + continue; + } + + if (class->level == 2) { + res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; + res->resctrl_res.name = "L2"; + } else { + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + res->resctrl_res.name = "L3"; + } + res->class = class; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { - /* TODO: initialise the resctrl resources */ + struct mpam_class *class = res->class; + struct rdt_resource *r = &res->resctrl_res; + + /* Is this one of the two well-known caches? */ + if (res->resctrl_res.rid == RDT_RESOURCE_L2 || + res->resctrl_res.rid == RDT_RESOURCE_L3) { + /* TODO: Scaling is not yet supported */ + r->cache.cbm_len = class->props.cpbm_wd; + r->cache.arch_has_sparse_bitmasks = true; + + /* mpam_devices will reject empty bitmaps */ + r->cache.min_cbm_bits = 1; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*x"; + r->fflags = RFTYPE_RES_CACHE; + r->default_ctrl = BIT_MASK(class->props.cpbm_wd) - 1; + r->data_width = (class->props.cpbm_wd + 3) / 4; + + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = r->default_ctrl; + + if (mpam_has_feature(mpam_feat_cpor_part, &class->props)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + if (class->level == 3 && cache_has_usable_csu(class)) { + r->mon_capable = true; + exposed_mon_capable = true; + } + } return 0; } -/* Called with the mpam classes lock held */ int mpam_resctrl_setup(void) { int err = 0; @@ -78,7 +203,7 @@ int mpam_resctrl_setup(void) res->resctrl_res.rid = i; } - /* TODO: pick MPAM classes to map to resctrl resources */ + mpam_resctrl_pick_caches(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -103,7 +228,8 @@ int mpam_resctrl_setup(void) */ pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); } - err = resctrl_init(); + + /* TODO: call resctrl_init() */ } return err; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 27c3ad9912ef..576bb97fa552 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -42,5 +42,12 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); +bool resctrl_arch_is_llc_occupancy_enabled(void); +bool resctrl_arch_is_mbm_local_enabled(void); + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return false; +} #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 1b3a0442011728ccc4bb89a38a41eb46625a4eaa Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 20 Aug 2021 15:28:42 +0100 Subject: [PATCH 632/953] arm_mpam: resctrl: Pick a value for num_rmid ANBZ: #8686 commit 8cf2449dc1536390ca1e875c5f39648cc711b0f4 morse-linux. After the changes to resctrl to support MPAM, num_rmid is only used as a value that is unfortunately exposed to user-space. For MPAM, this value doesn't mean anything, and whatever value we do expose will be wrong for some use cases. User-space may expect it can use this value to know how many 'extra' monitor groups it can create. e.g. on x86 if num_closid=4, num_rmid=8, then a total of 4 monitor groups can be created. If num_rmid were 2, then only 2 control groups could be created. For MPAM the number of pmg is very likely to be smaller than the number of partid, but this doesn't restrict the creation of control groups, as each control group has its own pmg space. Pick 1 if monitoring is supported. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index d294db50a651..f71f8c466817 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -180,10 +180,22 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) exposed_alloc_capable = true; } - if (class->level == 3 && cache_has_usable_csu(class)) { + if (class->level == 3 && cache_has_usable_csu(class)) r->mon_capable = true; - exposed_mon_capable = true; - } + } + + if (r->mon_capable) { + exposed_mon_capable = true; + + /* + * Unfortunately, num_rmid doesn't mean anything for + * mpam, and its exposed to user-space! + * num-rmid is supposed to mean the number of groups + * that can be created, both control or monitor groups. + * For mpam, each control group has its own pmg/rmid + * space. + */ + r->num_rmid = 1; } return 0; -- Gitee From 57d77cd7f266419ad24460629b907f8df7129ca2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Mar 2019 15:15:25 +0000 Subject: [PATCH 633/953] arm_mpam: resctrl: Implement resctrl_arch_reset_resources() ANBZ: #8686 commit 57dd4f86c8c2cc7797427e9cf4120f6a385bd05c morse-linux. We already have a helper for resetting an mpam class. Hook it up to resctrl_arch_reset_resources(). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 2 +- drivers/platform/mpam/mpam_internal.h | 2 ++ drivers/platform/mpam/mpam_resctrl.c | 27 +++++++++++++++++++++++++++ include/linux/arm_mpam.h | 3 +++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 28f6010df6dc..13f48878c3d5 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2177,7 +2177,7 @@ static void mpam_enable_once(void) READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } -static void mpam_reset_class(struct mpam_class *class) +void mpam_reset_class(struct mpam_class *class) { int idx; struct mpam_msc_ris *ris; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index c9d9abb87cff..612d5f8c0568 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -293,6 +293,8 @@ extern u8 mpam_pmg_max; void mpam_enable(struct work_struct *work); void mpam_disable(struct work_struct *work); +void mpam_reset_class(struct mpam_class *class); + int mpam_apply_config(struct mpam_component *comp, u16 partid, struct mpam_config *cfg); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index f71f8c466817..82325956a938 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -247,6 +247,33 @@ int mpam_resctrl_setup(void) return err; } +void resctrl_arch_reset_resources(void) +{ + int i, idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + if (!res->resctrl_res.alloc_capable) + continue; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); + } +} + static struct mpam_resctrl_dom * mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) { diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 576bb97fa552..97d4c8f076e4 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -50,4 +50,7 @@ static inline bool resctrl_arch_is_mbm_total_enabled(void) return false; } +/* reset cached configurations, then all devices */ +void resctrl_arch_reset_resources(void); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From ae24116f57c24252372fac5850b8719d42f752ab Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Mar 2019 14:34:44 +0000 Subject: [PATCH 634/953] arm_mpam: resctrl: Add resctrl_arch_get_config() ANBZ: #8686 commit dd4535f45deab7aeff4068d61963a9b2cecda7eb morse-linux. Implement resctrl_arch_get_config() by testing the configuration for a CPOR bitmap. For any other configuration type return the default. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 3 ++ drivers/platform/mpam/mpam_resctrl.c | 44 ++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 13f48878c3d5..5ebb944ad9fc 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2302,6 +2302,9 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, lockdep_assert_cpus_held(); + if (!memcmp(&comp->cfg[partid], cfg, sizeof(*cfg))) + return 0; + comp->cfg[partid] = *cfg; arg.comp = comp; arg.partid = partid; diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 82325956a938..cda8f08c0819 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -247,6 +247,50 @@ int mpam_resctrl_setup(void) return err; } +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type) +{ + u32 partid; + struct mpam_config *cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + enum mpam_device_features configured_by; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return r->default_ctrl; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, type); + cfg = &dom->comp->cfg[partid]; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + configured_by = mpam_feat_cpor_part; + break; + default: + return -EINVAL; + } + + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r) || + !mpam_has_feature(configured_by, cfg)) + return r->default_ctrl; + + switch (configured_by) { + case mpam_feat_cpor_part: + /* TODO: Scaling is not yet supported */ + return cfg->cpbm; + default: + return -EINVAL; + } +} + void resctrl_arch_reset_resources(void) { int i, idx; -- Gitee From 5b62cf9f11c32399b1fd16e29679d4b87c5c0f64 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 May 2021 12:19:36 +0100 Subject: [PATCH 635/953] arm_mpam: resctrl: Implement helpers to update configuration ANBZ: #8686 commit 72b1c4d7c4157d9d5ce7c20ea696630ff7cc5426 morse-linux. resctrl has two helpers for updating the configuration. resctrl_arch_update_one() updates a single value, and is used by the software-controller to apply feedback to the bandwidth controls, it has to be called on one of the CPUs in the resctrl:domain. resctrl_arch_update_domains() copies multiple staged configurations, it can be called from anywhere. Both helpers should update any changes to the underlying hardware. Imlpement resctrl_arch_update_domains() to use resctrl_arch_update_one(), which doesn't depend on being called on the right CPU. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_internal.h | 7 +-- drivers/platform/mpam/mpam_resctrl.c | 65 +++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 612d5f8c0568..36c49cfb9271 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -119,12 +119,7 @@ struct mpam_props }; #define mpam_has_feature(_feat, x) ((1<<_feat) & (x)->features) - -static inline void mpam_set_feature(enum mpam_device_features feat, - struct mpam_props *props) -{ - props->features |= (1<features |= (1<<_feat)) static inline void mpam_clear_feature(enum mpam_device_features feat, mpam_features_t *supported) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index cda8f08c0819..ebd8aef84679 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -291,6 +291,71 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, } } +int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type t, u32 cfg_val) +{ + u32 partid; + struct mpam_config cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + /* + * NOTE: don't check the CPU as mpam_apply_config() doesn't care, + * and resctrl_arch_update_domains() depends on this. + */ + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, t); + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r)) + return -EINVAL; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + /* TODO: Scaling is not yet supported */ + cfg.cpbm = cfg_val; + mpam_set_feature(mpam_feat_cpor_part, &cfg); + break; + default: + return -EINVAL; + } + + return mpam_apply_config(dom->comp, partid, &cfg); +} + +/* TODO: this is IPI heavy */ +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) +{ + int err = 0; + struct rdt_domain *d; + enum resctrl_conf_type t; + struct resctrl_staged_config *cfg; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + list_for_each_entry(d, &r->domains, list) { + for (t = 0; t < CDP_NUM_TYPES; t++) { + cfg = &d->staged_config[t]; + if (!cfg->have_new_ctrl) + continue; + + err = resctrl_arch_update_one(r, d, closid, t, + cfg->new_ctrl); + if (err) + return err; + } + } + + return err; +} + void resctrl_arch_reset_resources(void) { int i, idx; -- Gitee From a8c2c3184227063899c5929afd6e4fac055db03d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 17:19:16 +0100 Subject: [PATCH 636/953] arm_mpam: resctrl: Add CDP emulation ANBZ: #8686 commit 72b4ce74ed8ed79999a91b088c926dddc0667941 morse-linux. Intel RDT's CDP feature allows the cache to use a different control value depending on whether the accesses was for instruction fetch or a data access. MPAM's equivalent feature is the other way up: the CPU assigns a different partid label to traffic depending on whether it was instruction fetch or a data access, which causes the cache to use a different control value based solely on the partid. MPAM can emulate CDP, with the side effect that the alternative partid is seen by all MSC, it can't be enabled per-MSC. Add the resctrl hooks to turn this on or off. Add the helpers that match a closid against a task, which need to be aware that the value written to hardware is not the same as the one resctrl is using. The context switch code needs to match the default resctrl group's value against a variable, as this value changes depending on whether CDP is in use. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/include/asm/mpam.h | 3 +- drivers/platform/mpam/mpam_resctrl.c | 62 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 13 ++++++ 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index 1d81a6f26acd..edae8c98fa28 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -4,6 +4,7 @@ #ifndef __ASM__MPAM_H #define __ASM__MPAM_H +#include #include #include #include @@ -108,7 +109,7 @@ static inline void mpam_thread_switch(struct task_struct *tsk) !static_branch_likely(&mpam_enabled)) return; - if (!regval) + if (regval == READ_ONCE(mpam_resctrl_default_group)) regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index ebd8aef84679..64372f5bf380 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -19,6 +19,8 @@ #include "mpam_internal.h" +u64 mpam_resctrl_default_group; + /* * The classes we've picked to map to resctrl resources. * Class pointer may be NULL. @@ -29,6 +31,12 @@ static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; +/* + * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. + * This applies globally to all traffic the CPU generates. + */ +static bool cdp_enabled; + bool resctrl_arch_alloc_capable(void) { return exposed_alloc_capable; @@ -44,6 +52,36 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored) +{ + return cdp_enabled; +} + +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) +{ + u64 regval; + u32 partid, partid_i, partid_d; + + cdp_enabled = enable; + + partid = RESCTRL_RESERVED_CLOSID; + + if (enable) { + partid_d = resctrl_get_config_index(partid, CDP_CODE); + partid_i = resctrl_get_config_index(partid, CDP_DATA); + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + + } else { + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid); + } + + WRITE_ONCE(mpam_resctrl_default_group, regval); + + return 0; +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -54,6 +92,30 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return tsk_closid == closid; +} + +/* The task's pmg is not unique, the partid must be considered too */ +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + u32 tsk_rmid = FIELD_GET(MPAM_SYSREG_PMG_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return (tsk_closid == closid) && (tsk_rmid == rmid); +} + struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) { if (l >= RDT_NUM_RESOURCES) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 97d4c8f076e4..e3921b0ab836 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -5,8 +5,17 @@ #define __LINUX_ARM_MPAM_H #include +#include #include +/* + * The value of the MPAM1_EL1 sysreg when a task is in the default group. + * This is used by the context switch code to use the resctrl CPU property + * instead. The value is modified when CDP is enabled/disabled by mounting + * the resctrl filesystem. + */ +extern u64 mpam_resctrl_default_group; + struct mpam_msc; enum mpam_msc_iface { @@ -53,4 +62,8 @@ static inline bool resctrl_arch_is_mbm_total_enabled(void) /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 344d849e0ccb537124dc5f96828df8977be5b6d2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 17:16:00 +0100 Subject: [PATCH 637/953] arm64: mpam: Add helpers to change a tasks and cpu mpam partid/pmg values ANBZ: #8686 commit e2951d7d49e760dc0c759775950ad9ae652cd59d morse-linux. Care must be taken when modifying the partid and pmg of a task, as writing these values may race with the task being scheduled in, and reading the modified values. Add helpers to set the task properties, and the cpu default value, and add the plumbing to the mpam driver that lets resctrl use them. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ use WARN_ON_ONCE() instead of BUG_ON() to check the validity of closid and rmid ] [ rename expired resctrl_sched_in() to resctrl_arch_sched_in() ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/include/asm/mpam.h | 44 +++++++++++++++++++++ drivers/platform/mpam/mpam_resctrl.c | 58 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 7 ++++ 3 files changed, 109 insertions(+) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index edae8c98fa28..9abe1fe58c34 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -90,6 +91,35 @@ static inline void __init __enable_mpam_hcr(void) * A value in struct thread_info is used instead of struct task_struct as the * cpu's u64 register format is used, but struct task_struct has two u32'. */ + static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ + u64 default_val; + + default_val = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val); +} + +static inline void mpam_set_task_partid_pmg(struct task_struct *tsk, + u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval; + + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + regval |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + static inline u64 mpam_get_regval(struct task_struct *tsk) { #ifdef CONFIG_ARM64_MPAM @@ -99,6 +129,20 @@ static inline u64 mpam_get_regval(struct task_struct *tsk) #endif } +static inline void resctrl_arch_set_rmid(struct task_struct *tsk, u32 rmid) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval = mpam_get_regval(tsk); + + regval &= ~MPAM_SYSREG_PMG_D; + regval &= ~MPAM_SYSREG_PMG_I; + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, rmid); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, rmid); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + static inline void mpam_thread_switch(struct task_struct *tsk) { u64 oldregval; diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 64372f5bf380..225565ec4b04 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -92,6 +93,63 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +void resctrl_arch_sched_in(struct task_struct *tsk) +{ + lockdep_assert_preemption_disabled(); + + mpam_thread_switch(tsk); +} + +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg) +{ + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(pmg > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_cpu_defaults(cpu, closid, closid, pmg, pmg); + } else { + /* + * When CDP is enabled, resctrl halves the closid range and we + * use odd/even partid for one closid. + */ + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_cpu_defaults(cpu, partid_d, partid_i, pmg, pmg); + } +} + +void resctrl_arch_sync_cpu_defaults(void *info) +{ + struct resctrl_cpu_sync *r = info; + + lockdep_assert_preemption_disabled(); + + if (r) { + resctrl_arch_set_cpu_default_closid_rmid(smp_processor_id(), + r->closid, r->rmid); + } + + resctrl_arch_sched_in(current); +} + +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + + + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(rmid > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_task_partid_pmg(tsk, closid, closid, rmid, rmid); + } else { + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_task_partid_pmg(tsk, partid_d, partid_i, rmid, rmid); + } +} + bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) { u64 regval = mpam_get_regval(tsk); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index e3921b0ab836..95a960b6f9d7 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -16,6 +16,8 @@ */ extern u64 mpam_resctrl_default_group; +#include + struct mpam_msc; enum mpam_msc_iface { @@ -66,4 +68,9 @@ bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); +void resctrl_arch_sched_in(struct task_struct *tsk); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 17da5a889c2242dacfa47c9ed61d94f2487b46c2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:45:14 +0100 Subject: [PATCH 638/953] arm_mpam: resctrl: Add rmid index helpers ANBZ: #8686 commit f018052a6be7a89ea54c955f696027ecd885b248 morse-linux. Because MPAM's pmg aren't identical to RDT's rmid, resctrl handles some datastructrues by index. This allows x86 to map indexes to RMID, and MPAM to map them to partid-and-pmg. Add the helpers to do this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ use WARN_ON_ONCE() instead of BUG_ON() to check the validity of closid_shift ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 30 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 3 +++ 2 files changed, 33 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 225565ec4b04..1efefbf1313b 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -93,6 +93,36 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +u32 resctrl_arch_system_num_rmid_idx(void) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 num_partid = resctrl_arch_get_num_closid(NULL); + + return num_partid << closid_shift; +} + +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + return (closid << closid_shift) | rmid; +} + +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 pmg_mask = ~(~0 << closid_shift); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + *closid = idx >> closid_shift; + *rmid = idx & pmg_mask; +} + void resctrl_arch_sched_in(struct task_struct *tsk) { lockdep_assert_preemption_disabled(); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 95a960b6f9d7..d41891df56d4 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -72,5 +72,8 @@ void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); void resctrl_arch_sched_in(struct task_struct *tsk); +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); +u32 resctrl_arch_system_num_rmid_idx(void); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 3277fde1fd59dac90125d81feafe340424a69e2f Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 27 Jul 2021 18:09:13 +0100 Subject: [PATCH 639/953] untested: arm_mpam: resctrl: Add support for MB resource ANBZ: #8686 commit 62e0c4c1568edc593aeb130a3f83e628999df009 morse-linux. resctrl supports 'MB', as a percentage throttling of traffic somewhere after the L3. This is the control that mba_sc uses, so ideally the class chosen should be as close as possible to the counters used for mba_local. MB's percentage control can be backed either with the fixed point fraction MBW_MAX or the bandwidth portion bitmap. Add helper to convert to/from percentages. One problem here is the value written is not the same as the value read back. This is deliberatly made visible to user-space. Another is the MBW_MAX fixed point fraction can't represent 100%. This is also exposed to user-space, as otherwise the values for a single-bit system is 100%, 0%, instead of 50%, 0%. The way CDP is emulated means MB controls need programming twice by the resctrl glue, as the bandwidth controls can be applied independently for data or instruction-fetch. This isn't how x86 behaves, and neither user-space nor resctrl support it. CC: Amit Singh Tomar Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 219 ++++++++++++++++++++++++++- 1 file changed, 216 insertions(+), 3 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 1efefbf1313b..b4be7f81e79b 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -53,9 +53,20 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } -bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored) +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { - return cdp_enabled; + switch (rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + return cdp_enabled; + case RDT_RESOURCE_MBA: + default: + /* + * x86's MBA control doesn't support CDP, so user-space doesn't + * expect it. + */ + return false; + } } int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) @@ -83,6 +94,11 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) return 0; } +static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) +{ + return cdp_enabled && !resctrl_arch_get_cdp_enabled(rid); +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -250,6 +266,102 @@ bool resctrl_arch_is_llc_occupancy_enabled(void) return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); } +static bool mba_class_use_mbw_part(struct mpam_props *cprops) +{ + /* TODO: Scaling is not yet supported */ + return (mpam_has_feature(mpam_feat_mbw_part, cprops) && + cprops->mbw_pbm_bits < MAX_MBA_BW); +} + +static bool class_has_usable_mba(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops) || + mpam_has_feature(mpam_feat_mbw_max, cprops)) + return true; + + return false; +} + +/* + * Calculate the percentage change from each implemented bit in the control + * This can return 0 when BWA_WD is greater than 6. (100 / (1<<7) == 0) + */ +static u32 get_mba_granularity(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops)) { + return MAX_MBA_BW / cprops->mbw_pbm_bits; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + /* + * bwa_wd is the number of bits implemented in the 0.xxx + * fixed point fraction. 1 bit is 50%, 2 is 25% etc. + */ + return MAX_MBA_BW / (cprops->bwa_wd + 1); + } + + return 0; +} + +static u32 mbw_pbm_to_percent(unsigned long mbw_pbm, struct mpam_props *cprops) +{ + u32 bit, result = 0, granularity = get_mba_granularity(cprops); + + for_each_set_bit(bit, &mbw_pbm, cprops->mbw_pbm_bits % 32) { + result += granularity; + } + + return result; +} + +static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + for (bit = 15; bit; bit--) { + if (mbw_max & BIT(bit)) + value += MAX_MBA_BW / divisor; + divisor <<= 1; + } + + return value; +} + +static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) +{ + u32 granularity = get_mba_granularity(cprops); + u8 num_bits = pc / granularity; + + if (!num_bits) + return 0; + + /* TODO: pick bits at random to avoid contention */ + return (1 << num_bits) - 1; +} + +static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + if (WARN_ON_ONCE(cprops->bwa_wd > 15)) + return MAX_MBA_BW; + + for (bit = 15; bit; bit--) { + if (pc >= MAX_MBA_BW / divisor) { + pc -= MAX_MBA_BW / divisor; + value |= BIT(bit); + } + divisor <<= 1; + + if (!pc || !(MAX_MBA_BW / divisor)) + break; + } + + value &= GENMASK(15, 15 - cprops->bwa_wd); + + return value; +} + /* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ static void mpam_resctrl_pick_caches(void) { @@ -296,6 +408,44 @@ static void mpam_resctrl_pick_caches(void) srcu_read_unlock(&mpam_srcu, idx); } +static void mpam_resctrl_pick_mba(void) +{ + struct mpam_class *class, *candidate_class = NULL; + struct mpam_resctrl_res *res; + int idx; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + + if (class->level < 3) + continue; + + if (!class_has_usable_mba(cprops)) + continue; + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) + continue; + + /* + * mba_sc reads the mbm_local counter, and waggles the MBA controls. + * mbm_local is implicitly part of the L3, pick a resouce to be MBA + * that as close as possible to the L3. + */ + if (!candidate_class || class->level < candidate_class->level) + candidate_class = class; + } + srcu_read_unlock(&mpam_srcu, idx); + + if (candidate_class) { + res = &mpam_resctrl_exports[RDT_RESOURCE_MBA]; + res->class = candidate_class; + res->resctrl_res.name = "MB"; + } +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; @@ -332,6 +482,27 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) if (class->level == 3 && cache_has_usable_csu(class)) r->mon_capable = true; + } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { + struct mpam_props *cprops = &class->props; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*u"; + r->fflags = RFTYPE_RES_MB; + r->default_ctrl = MAX_MBA_BW; + r->data_width = 3; + + r->membw.delay_linear = true; + r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; + r->membw.bw_gran = get_mba_granularity(cprops); + + /* Round up to at least 1% */ + if (!r->membw.bw_gran) + r->membw.bw_gran = 1; + + if (class_has_usable_mba(cprops)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } } if (r->mon_capable) { @@ -366,6 +537,7 @@ int mpam_resctrl_setup(void) } mpam_resctrl_pick_caches(); + mpam_resctrl_pick_mba(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -424,6 +596,15 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, case RDT_RESOURCE_L3: configured_by = mpam_feat_cpor_part; break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + configured_by = mpam_feat_mbw_part; + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + configured_by = mpam_feat_mbw_max; + break; + } + fallthrough; default: return -EINVAL; } @@ -436,6 +617,11 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, case mpam_feat_cpor_part: /* TODO: Scaling is not yet supported */ return cfg->cpbm; + case mpam_feat_mbw_part: + /* TODO: Scaling is not yet supported */ + return mbw_pbm_to_percent(cfg->mbw_pbm, cprops); + case mpam_feat_mbw_max: + return mbw_max_to_percent(cfg->mbw_max, cprops); default: return -EINVAL; } @@ -444,6 +630,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type t, u32 cfg_val) { + int err; u32 partid; struct mpam_config cfg; struct mpam_props *cprops; @@ -472,11 +659,37 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, cfg.cpbm = cfg_val; mpam_set_feature(mpam_feat_cpor_part, &cfg); break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + cfg.mbw_pbm = percent_to_mbw_pbm(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_part, &cfg); + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + cfg.mbw_max = percent_to_mbw_max(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_max, &cfg); + break; + } + fallthrough; default: return -EINVAL; } - return mpam_apply_config(dom->comp, partid, &cfg); + /* + * When CDP is enabled, but the resource doesn't support it, we need to + * apply the same configuration to the other partid. + */ + if (mpam_resctrl_hide_cdp(r->rid)) { + partid = resctrl_get_config_index(closid, CDP_CODE); + err = mpam_apply_config(dom->comp, partid, &cfg); + if (err) + return err; + + partid = resctrl_get_config_index(closid, CDP_DATA); + return mpam_apply_config(dom->comp, partid, &cfg); + + } else { + return mpam_apply_config(dom->comp, partid, &cfg); + } } /* TODO: this is IPI heavy */ -- Gitee From f49ed55dcfbd41a6b5f54b15704203ece469e51c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Sep 2021 17:21:42 +0100 Subject: [PATCH 640/953] untested: arm_mpam: resctrl: Add support for mbm counters ANBZ: #8686 commit a0ab3a6c26002494e8532b9e01859d36e89ca585 morse-linux. resctrl has two types of counters, NUMA-local and global. MPAM has only bandwidth counters, but the position of the MSC may mean it counts NUMA-local, or global traffic. But the topology information is not available. Apply a hueristic: the L2 or L3 supports bandwidth monitors, these are probably NUMA-local. If the memory controller supports bandwidth monitors, they are probably global. This selection is made from mpam_resctrl_resource_init(), which implies resources that can be used for resctrl controls exist and also have counters. This would be a problem on a platform that only supports monitoring. TODO: Add an extra pass of all classes to find the classes to use as bandwidth counters. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 52 +++++++++++++++++++++++++++- include/linux/arm_mpam.h | 6 +--- 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index b4be7f81e79b..4786be5dc4b4 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -31,6 +31,7 @@ static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; +static struct mpam_class *mbm_total_class; /* * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. @@ -53,6 +54,11 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } +bool resctrl_arch_is_mbm_total_enabled(void) +{ + return mbm_total_class; +} + bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { switch (rid) { @@ -266,6 +272,24 @@ bool resctrl_arch_is_llc_occupancy_enabled(void) return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); } +static bool class_has_usable_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return false; + + /* + * resctrl expects the bandwidth counters to be free running, + * which means we need as many monitors as resctrl has + * control/monitor groups. + */ + if (cprops->num_mbwu_mon < resctrl_arch_system_num_rmid_idx()) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + static bool mba_class_use_mbw_part(struct mpam_props *cprops) { /* TODO: Scaling is not yet supported */ @@ -450,10 +474,13 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; struct rdt_resource *r = &res->resctrl_res; + bool has_mbwu = class_has_usable_mbwu(class); /* Is this one of the two well-known caches? */ if (res->resctrl_res.rid == RDT_RESOURCE_L2 || res->resctrl_res.rid == RDT_RESOURCE_L3) { + bool has_csu = cache_has_usable_csu(class); + /* TODO: Scaling is not yet supported */ r->cache.cbm_len = class->props.cpbm_wd; r->cache.arch_has_sparse_bitmasks = true; @@ -480,8 +507,25 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) exposed_alloc_capable = true; } - if (class->level == 3 && cache_has_usable_csu(class)) + /* + * MBWU counters may be 'local' or 'total' depending on where + * they are in the topology. Counters on caches are assumed to + * be local. If it's on the memory controller, its assumed to + * be global. + */ + if (has_mbwu && class->level >= 3) { + mbm_local_class = class; r->mon_capable = true; + } + + /* + * CSU counters only make sense on a cache. The file is called + * llc_occupancy, but its expected to the on the L3. + */ + if (has_csu && class->type == MPAM_CLASS_CACHE && + class->level == 3) { + r->mon_capable = true; + } } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { struct mpam_props *cprops = &class->props; @@ -503,6 +547,11 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) r->alloc_capable = true; exposed_alloc_capable = true; } + + if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { + mbm_total_class = class; + r->mon_capable = true; + } } if (r->mon_capable) { @@ -538,6 +587,7 @@ int mpam_resctrl_setup(void) mpam_resctrl_pick_caches(); mpam_resctrl_pick_mba(); + /* TODO: mpam_resctrl_pick_counters(); */ for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index d41891df56d4..49416f22244a 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -55,11 +55,7 @@ bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); bool resctrl_arch_is_mbm_local_enabled(void); - -static inline bool resctrl_arch_is_mbm_total_enabled(void) -{ - return false; -} +bool resctrl_arch_is_mbm_total_enabled(void); /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); -- Gitee From cec8da5272a74c2e7b663ead012851c7bf510bba Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 13:29:39 +0100 Subject: [PATCH 641/953] arm_mpam: resctrl: Allow resctrl to allocate monitors ANBZ: #8686 commit 0e131acd37ebe32adab83e1d2200dd6205b7683f morse-linux. When resctrl wants to read a domain's 'QOS_L3_OCCUP', it needs to allocate a monitor on the corresponding resource. Monitors are allocated by class instead of component. Add helpers to do this. The MBM events depend on having their monitors allocated at init time so that they can be left running. The value USE_RMID_IDX is out of range for a monitor, and is used to indicate this behaviour. resctrl_arch_mon_ctx_alloc() is implemented to have a no_wait version and a waitqueue for callers that sleep. The no_wait version will later become an interface for the resctrl_pmu to use. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_internal.h | 3 ++ drivers/platform/mpam/mpam_resctrl.c | 72 +++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 ++ 3 files changed, 79 insertions(+) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 36c49cfb9271..8d4bcc1f5642 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -17,6 +17,9 @@ DECLARE_STATIC_KEY_FALSE(mpam_enabled); +/* Value to indicate the allocated monitor is derived from the RMID index. */ +#define USE_RMID_IDX (U16_MAX + 1) + static inline bool mpam_is_enabled(void) { return static_branch_likely(&mpam_enabled); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 4786be5dc4b4..8cb4707a4117 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -22,6 +22,8 @@ u64 mpam_resctrl_default_group; +DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); + /* * The classes we've picked to map to resctrl resources. * Class pointer may be NULL. @@ -39,6 +41,10 @@ static struct mpam_class *mbm_total_class; */ static bool cdp_enabled; +/* A dummy mon context to use when the monitors were allocated up front */ +u32 __mon_is_rmid_idx = USE_RMID_IDX; +void *mon_is_rmid_idx = &__mon_is_rmid_idx; + bool resctrl_arch_alloc_capable(void) { return exposed_alloc_capable; @@ -234,6 +240,72 @@ struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) return &mpam_resctrl_exports[l].resctrl_res; } +static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, + int evtid) +{ + struct mpam_resctrl_res *res; + u32 *ret = kmalloc(sizeof(*ret), GFP_KERNEL); + + if (!ret) + return ERR_PTR(-ENOMEM); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + *ret = mpam_alloc_csu_mon(res->class); + return ret; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + return mon_is_rmid_idx; + } + + return ERR_PTR(-EOPNOTSUPP); +} + +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + DEFINE_WAIT(wait); + void *ret; + + might_sleep(); + + do { + prepare_to_wait(&resctrl_mon_ctx_waiters, &wait, + TASK_INTERRUPTIBLE); + ret = resctrl_arch_mon_ctx_alloc_no_wait(r, evtid); + if (PTR_ERR(ret) == -ENOSPC) + schedule(); + } while (PTR_ERR(ret) == -ENOSPC && !signal_pending(current)); + finish_wait(&resctrl_mon_ctx_waiters, &wait); + + return ret; +} + +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *arch_mon_ctx) +{ + struct mpam_resctrl_res *res; + u32 mon = *(u32 *)arch_mon_ctx; + + if (mon == USE_RMID_IDX) + return; + kfree(arch_mon_ctx); + arch_mon_ctx = NULL; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + mpam_free_csu_mon(res->class, mon); + wake_up(&resctrl_mon_ctx_waiters); + return; + case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L3_MBM_LOCAL_EVENT_ID: + return; + } +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 49416f22244a..88000eb59c6f 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -72,4 +72,8 @@ u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); u32 resctrl_arch_system_num_rmid_idx(void); +struct rdt_resource; +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From e1e0fa094b29433eca0d4acab02407100b6d251d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 16:36:58 +0100 Subject: [PATCH 642/953] arm_mpam: resctrl: Add resctrl_arch_rmid_read() and resctrl_arch_reset_rmid() ANBZ: #8686 commit f77ded5f6670639c76c0941dc6943d9e1b261b95 morse-linux. resctrl uses resctrl_arch_rmid_read() to read counters. CDP emulation means the counter may need reading twice to get both the I and D side allocations. The same goes for reset. Add the rounding helper for checking monitor values while we're here. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 79 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 ++ 2 files changed, 83 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 8cb4707a4117..730ea6c6dffc 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -306,6 +306,85 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, } } +int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx) +{ + int err; + u64 cdp_val; + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + u32 mon = *(u32 *)arch_mon_ctx; + enum mpam_device_features type; + + resctrl_arch_rmid_read_context_check(); + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + switch (eventid) { + case QOS_L3_OCCUP_EVENT_ID: + type = mpam_feat_msmon_csu; + break; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + type = mpam_feat_msmon_mbwu; + break; + default: + return -EINVAL; + } + + cfg.mon = mon; + if (cfg.mon == USE_RMID_IDX) + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + + cfg.match_pmg = true; + cfg.pmg = rmid; + + if (cdp_enabled) { + cfg.partid = closid << 1; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + if (err) + return err; + + cfg.partid += 1; + err = mpam_msmon_read(dom->comp, &cfg, type, &cdp_val); + if (!err) + *val += cdp_val; + } else { + cfg.partid = closid; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + } + + return err; +} + +void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid) +{ + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + + if (eventid != QOS_L3_MBM_LOCAL_EVENT_ID) + return; + + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + cfg.match_pmg = true; + cfg.pmg = rmid; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cdp_enabled) { + cfg.partid = closid << 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + + cfg.partid += 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } else { + cfg.partid = closid; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 88000eb59c6f..abadaba0085f 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -50,6 +50,10 @@ int mpam_register_requestor(u16 partid_max, u8 pmg_max); int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); +static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) +{ + return val; +} bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); -- Gitee From 9bfe6e759a7eac740795e7bc943a9ebdcbc55c5b Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 9 Mar 2023 14:01:42 +0000 Subject: [PATCH 643/953] untested: arm_mpam: resctrl: Allow monitors to be configured ANBZ: #8686 commit db0ac51f60675b6c4a54ccd24fa7198ec321c56d morse-linux. MPAM MSCs may have support for filtering reads or writes when monitoring traffic. Resctrl has a configuration bitmap for which kind of accesses should be monitored. Bridge the gap where possible. MPAM only has a read/write bit, so not all the combinations can be supported. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 26 +++++++++++ drivers/platform/mpam/mpam_internal.h | 9 ++++ drivers/platform/mpam/mpam_resctrl.c | 62 +++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 5ebb944ad9fc..04e1e98e17cb 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1076,6 +1076,32 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, return err; } +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp) +{ + int idx, i; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + ris->mbwu_state[i].correction = 0; + ris->mbwu_state[i].reset_on_next_read = true; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) { int idx; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 8d4bcc1f5642..50d738d83047 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -20,6 +20,12 @@ DECLARE_STATIC_KEY_FALSE(mpam_enabled); /* Value to indicate the allocated monitor is derived from the RMID index. */ #define USE_RMID_IDX (U16_MAX + 1) +/* + * Only these event configuration bits are supported. MPAM can't know if + * data is being written back, these will show up as a write. + */ +#define MPAM_RESTRL_EVT_CONFIG_VALID (READS_TO_LOCAL_MEM | NON_TEMP_WRITE_TO_LOCAL_MEM) + static inline bool mpam_is_enabled(void) { return static_branch_likely(&mpam_enabled); @@ -240,6 +246,8 @@ struct mpam_msc_ris { struct mpam_resctrl_dom { struct mpam_component *comp; struct rdt_domain resctrl_dom; + + u32 mbm_local_evt_cfg; }; struct mpam_resctrl_res { @@ -299,6 +307,7 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp); int mpam_resctrl_online_cpu(unsigned int cpu); int mpam_resctrl_offline_cpu(unsigned int cpu); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 730ea6c6dffc..328837867d60 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -306,6 +306,18 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, } } +static enum mon_filter_options resctrl_evt_config_to_mpam(u32 local_evt_cfg) +{ + switch (local_evt_cfg) { + case READS_TO_LOCAL_MEM: + return COUNT_READ; + case NON_TEMP_WRITE_TO_LOCAL_MEM: + return COUNT_WRITE; + default: + return COUNT_BOTH; + } +} + int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *arch_mon_ctx) @@ -339,6 +351,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, cfg.match_pmg = true; cfg.pmg = rmid; + cfg.opts = resctrl_evt_config_to_mpam(dom->mbm_local_evt_cfg); if (cdp_enabled) { cfg.partid = closid << 1; @@ -621,6 +634,54 @@ static void mpam_resctrl_pick_mba(void) } } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + struct mpam_props *cprops; + + switch (evt) { + case QOS_L3_MBM_LOCAL_EVENT_ID: + if (!mbm_local_class) + return false; + cprops = &mbm_local_class->props; + + return mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, cprops); + default: + return false; + } +} + +void resctrl_arch_mon_event_config_read(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + mon_info->mon_config = dom->mbm_local_evt_cfg & MAX_EVT_CONFIG_BITS; +} + +void resctrl_arch_mon_event_config_write(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + if (mon_info->mon_config & ~MPAM_RESTRL_EVT_CONFIG_VALID) { + mon_info->err = -EOPNOTSUPP; + return; + } + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = mon_info->mon_config & MPAM_RESTRL_EVT_CONFIG_VALID; +} + +void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) +{ + struct mpam_resctrl_dom *dom; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + mpam_msmon_reset_all_mbwu(dom->comp); +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; @@ -973,6 +1034,7 @@ mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) dom->comp = comp; INIT_LIST_HEAD(&dom->resctrl_dom.list); dom->resctrl_dom.id = comp->comp_id; + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); /* TODO: this list should be sorted */ -- Gitee From 2fd493adebec88c98fb2c0cd35833be113f1740d Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:48:23 +0100 Subject: [PATCH 644/953] arm_mpam: resctrl: Add empty definitions for pseudo lock ANBZ: #8686 commit 12329dd3015ff84e06b3044cfb506a555dba7b29 morse-linux. Pseudo lock isn't supported on arm64. Add empty definitions of the functions arm64 doesn't implement. Because the Kconfig option is not selected, none of these will be called. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index abadaba0085f..8897309c163d 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -80,4 +80,11 @@ struct rdt_resource; void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); +/* Pseudo lock is not supported by MPAM */ +static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } +static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 566fd4e714c7e6aa25da4eb147a08b3ce4a872cc Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 15:34:16 +0100 Subject: [PATCH 645/953] arm_mpam: resctrl: Add empty definitions for fine-grained enables ANBZ: #8686 commit 0d96e8cc81e11d9ae1ce9392c5ff59e9db82339e morse-linux. resctrl has individual hooks to separately enable and disable the closid/partid and rmid/pmg context switching code. For MPAM this is all the same thing, as the value in struct task_struct is used to cache the value that should be written to hardware. arm64's context switching code is enabled once MPAM is usable, but doesn't touch the hardware unless the value has changed. Resctrl doesn't need to ask. Add empty definitions for these hoooks. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 8897309c163d..f6b92060b811 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -87,4 +87,13 @@ static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } +/* + * The CPU configuration for MPAM is cheap to write, and is only written if it + * has changed. No need for fine grained enables. + */ +static inline void resctrl_arch_enable_mon(void) { } +static inline void resctrl_arch_disable_mon(void) { } +static inline void resctrl_arch_enable_alloc(void) { } +static inline void resctrl_arch_disable_alloc(void) { } + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From d93d00ea0ef4b14df62c3ab61fdf49ae1e093791 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 2 Sep 2022 11:08:41 +0100 Subject: [PATCH 646/953] arm_mpam: resctrl: Add dummy definition for free running counters ANBZ: #8686 commit 8876ec5dfa36043b8e8c80de9d35f07495dbf77c morse-linux. resctrl expects RDT like counters that are free running. MPAM's counters don't behave like this as they need a monitor to be allocated first. Provide the helper that says whether free running counters are supported. Subsequent patches will make this more intelligent. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index f6b92060b811..239d27af9e32 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -55,6 +55,12 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val; } +/* MPAM counters requires a monitor to be allocated */ +static inline bool resctrl_arch_event_is_free_running(enum resctrl_event_id evt) +{ + return false; +} + bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); -- Gitee From f4a8cad0effce1b908adbe3637076692a8294839 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 15:40:17 +0100 Subject: [PATCH 647/953] arm64: mpam: Select ARCH_HAS_CPU_RESCTRL ANBZ: #8686 commit 01473b460f8b425cf15bc838864435ce79c366b4 morse-linux. Enough MPAM support is present to enable ARCH_HAS_CPU_RESCTRL. Let it rip^Wlink! Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ add missing SPDX-License-Identifier tag ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/Kconfig | 3 ++- arch/arm64/include/asm/resctrl.h | 2 ++ drivers/platform/mpam/Kconfig | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/include/asm/resctrl.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7c09c2580869..8d631f7ba66d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1977,7 +1977,8 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" select ACPI_MPAM if ACPI - select ARM_CPU_RESCTRL + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h new file mode 100644 index 000000000000..b506e95cf6e3 --- /dev/null +++ b/arch/arm64/include/asm/resctrl.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig index 13bd86fc5e58..75f5b2454fbe 100644 --- a/drivers/platform/mpam/Kconfig +++ b/drivers/platform/mpam/Kconfig @@ -2,5 +2,7 @@ # CPU resources, not containers or cgroups etc. config ARM_CPU_RESCTRL bool - depends on ARM64 + default y + depends on ARM64 && ARCH_HAS_CPU_RESCTRL + depends on MISC_FILESYSTEMS select RESCTRL_RMID_DEPENDS_ON_CLOSID -- Gitee From f1211451a52100713453b9495284188eb3f806b4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 5 Dec 2023 16:18:37 +0000 Subject: [PATCH 648/953] perf/arm-cmn: Stop claiming all the resources ANBZ: #8686 commit af8184da77bf79023a968bab12ad78de7f7311f1 morse-linux. Carl reports that when both the MPAM driver and CMN driver are built into the kernel, they fight over who can claim the resources associated with their registers. This prevents the second of these two drivers from probing. Currently the CMN PMU driver claims all the CMN registers. The MPAM registers are grouped together in a small number of pages, whereas the PMU registers that the CMN PMU driver uses appear throughout the CMN register space. Having the CMN driver claim all the resources is the wrong thing to do, and claiming individual registers here and there is not worthwhile. Instead, stop the CMN driver from claiming any resources as its registers are not grouped together. Reported-by: Carl Worth Tested-by: Carl Worth Signed-off-by: James Morse CC: Ilkka Koskinen [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ remove the redundant PTR_ERR macro ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/perf/arm-cmn.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 2c684e49a6fc..ec12b02d7134 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2439,6 +2439,7 @@ static int arm_cmn_probe(struct platform_device *pdev) struct arm_cmn *cmn; const char *name; static atomic_t id; + struct resource *cfg; int err, rootnode, this_id; cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); @@ -2453,7 +2454,16 @@ static int arm_cmn_probe(struct platform_device *pdev) rootnode = arm_cmn600_acpi_probe(pdev, cmn); } else { rootnode = 0; - cmn->base = devm_platform_ioremap_resource(pdev, 0); + + /* + * Avoid registering resources as the PMUs registers are + * scattered through CMN, and may appear either side of + * registers for other 'devices'. (e.g. the MPAM MSC controls). + */ + cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!cfg) + return -EINVAL; + cmn->base = devm_ioremap(&pdev->dev, cfg->start, resource_size(cfg)); if (IS_ERR(cmn->base)) return PTR_ERR(cmn->base); if (cmn->part == PART_CMN600) -- Gitee From 9ec1728ce927b4f294afd81c8aaed61f430421f4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 19 Aug 2021 15:06:55 +0100 Subject: [PATCH 649/953] arm_mpam: resctrl: Tell resctrl about cpu/domain online/offline ANBZ: #8686 commit f6d31defad669e42342bd5ae044e85eb9be239da morse-linux. Now that mpam links against resctrl, call the cpu and domain online/offline calls at the appropriate point. Signed-off-by: James Morse Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 328837867d60..976641a2af12 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -825,7 +825,7 @@ int mpam_resctrl_setup(void) pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); } - /* TODO: call resctrl_init() */ + err = resctrl_init(); } return err; @@ -1080,7 +1080,7 @@ struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) int mpam_resctrl_online_cpu(unsigned int cpu) { - int i; + int i, err; struct mpam_resctrl_dom *dom; struct mpam_resctrl_res *res; @@ -1099,8 +1099,12 @@ int mpam_resctrl_online_cpu(unsigned int cpu) dom = mpam_resctrl_alloc_domain(cpu, res); if (IS_ERR(dom)) return PTR_ERR(dom); + err = resctrl_online_domain(&res->resctrl_res, &dom->resctrl_dom); + if (err) + return err; } + resctrl_online_cpu(cpu); return 0; } @@ -1111,6 +1115,8 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) struct mpam_resctrl_res *res; struct mpam_resctrl_dom *dom; + resctrl_offline_cpu(cpu); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -1129,6 +1135,7 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) if (!cpumask_empty(&d->cpu_mask)) continue; + resctrl_offline_domain(&res->resctrl_res, &dom->resctrl_dom); list_del(&d->list); kfree(dom); } -- Gitee From e0daf7d34e6e85e8edf0c2c091b3ad7e63835555 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 2 Nov 2021 12:45:26 +0000 Subject: [PATCH 650/953] arm_mpam: resctrl: Call resctrl_exit() in the event of errors ANBZ: #8686 commit 44ac89e937ea85aae5ec363f003f50cb2791d3f9 morse-linux. All of MPAMs errors indicate a software bug, e.g. an out-of-bounds partid has been generated. When this happens, the mpam driver is disabled. If resctrl_init() succeeded, also call resctrl_exit() to remove resctrl. If the filesystem was mounted in its traditional place, it is no longer possible for processes to find it as the mount point has been removed. If the filesystem was mounted elsewhere, it will appear that all CPU and domains are offline. User-space will not be able to update the hardware. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 2 ++ drivers/platform/mpam/mpam_internal.h | 1 + drivers/platform/mpam/mpam_resctrl.c | 17 +++++++++++++++++ 3 files changed, 20 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 04e1e98e17cb..32532a918115 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2240,6 +2240,8 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) } mutex_unlock(&mpam_cpuhp_state_lock); + mpam_resctrl_exit(); + static_branch_disable(&mpam_enabled); mpam_unregister_irqs(); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 50d738d83047..48554ff93e09 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -313,6 +313,7 @@ int mpam_resctrl_online_cpu(unsigned int cpu); int mpam_resctrl_offline_cpu(unsigned int cpu); int mpam_resctrl_setup(void); +void mpam_resctrl_exit(void); /* * MPAM MSCs have the following register layout. See: diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 976641a2af12..62bfa7d772d9 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -41,6 +41,12 @@ static struct mpam_class *mbm_total_class; */ static bool cdp_enabled; +/* + * If resctrl_init() succeeded, resctrl_exit() can be used to remove support + * for the filesystem in the event of an error. + */ +static bool resctrl_enabled; + /* A dummy mon context to use when the monitors were allocated up front */ u32 __mon_is_rmid_idx = USE_RMID_IDX; void *mon_is_rmid_idx = &__mon_is_rmid_idx; @@ -826,11 +832,22 @@ int mpam_resctrl_setup(void) } err = resctrl_init(); + if (!err) + WRITE_ONCE(resctrl_enabled, true); } return err; } +void mpam_resctrl_exit(void) +{ + if (!READ_ONCE(resctrl_enabled)) + return; + + WRITE_ONCE(resctrl_enabled, false); + resctrl_exit(); +} + u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { -- Gitee From 621bf59cc1805d559345f2242a081715a3be273d Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 19:01:23 +0100 Subject: [PATCH 651/953] arm_mpam: resctrl: Update the rmid reallocation limit ANBZ: #8686 commit 0066c4921f837d64ba963d0b47a4271388aacd1e morse-linux. resctrl's limbo code needs to be told when the data left in a cache is small enough for the partid+pmg value to be re-allocated. x86 uses the cache size divded by the number of rmid users the cache may have. Do the same, but for the smallest cache, and with the number of partid-and-pmg users. Querying the cache size can't happen until after cacheinfo_sysfs_init() has run, so mpam_resctrl_setup() must wait until then. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 62bfa7d772d9..4062c0f93f85 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -47,6 +48,13 @@ static bool cdp_enabled; */ static bool resctrl_enabled; +/* + * mpam_resctrl_pick_caches() needs to know the size of the caches. cacheinfo + * populates this from a device_initcall(). mpam_resctrl_setup() must wait. + */ +static bool cacheinfo_ready; +static DECLARE_WAIT_QUEUE_HEAD(wait_cacheinfo_ready); + /* A dummy mon context to use when the monitors were allocated up front */ u32 __mon_is_rmid_idx = USE_RMID_IDX; void *mon_is_rmid_idx = &__mon_is_rmid_idx; @@ -404,6 +412,24 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, } } +/* + * The rmid realloc threshold should be for the smallest cache exposed to + * resctrl. + */ +static void update_rmid_limits(unsigned int size) +{ + u32 num_unique_pmg = resctrl_arch_system_num_rmid_idx(); + + if (WARN_ON_ONCE(!size)) + return; + + if (resctrl_rmid_realloc_limit && size > resctrl_rmid_realloc_limit) + return; + + resctrl_rmid_realloc_limit = size; + resctrl_rmid_realloc_threshold = size / num_unique_pmg; +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -560,11 +586,15 @@ static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) static void mpam_resctrl_pick_caches(void) { int idx; + unsigned int cache_size; struct mpam_class *class; struct mpam_resctrl_res *res; + lockdep_assert_cpus_held(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; bool has_cpor = cache_has_usable_cpor(class); if (class->type != MPAM_CLASS_CACHE) { @@ -590,6 +620,16 @@ static void mpam_resctrl_pick_caches(void) continue; } + /* Assume cache levels are the same size for all CPUs... */ + cache_size = get_cpu_cacheinfo_size(smp_processor_id(), class->level); + if (!cache_size) { + pr_debug("pick_caches: Could not read cache size\n"); + continue; + } + + if (mpam_has_feature(mpam_feat_msmon_csu, cprops)) + update_rmid_limits(cache_size); + if (class->level == 2) { res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; res->resctrl_res.name = "L2"; @@ -795,6 +835,8 @@ int mpam_resctrl_setup(void) struct mpam_resctrl_res *res; enum resctrl_res_level i; + wait_event(wait_cacheinfo_ready, cacheinfo_ready); + cpus_read_lock(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -1159,3 +1201,12 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) return 0; } + +static int __init __cacheinfo_ready(void) +{ + cacheinfo_ready = true; + wake_up(&wait_cacheinfo_ready); + + return 0; +} +device_initcall_sync(__cacheinfo_ready); -- Gitee From 01a9ff22b6e790c696b57f43c599e1a721b48b27 Mon Sep 17 00:00:00 2001 From: Amit Singh Tomar Date: Mon, 9 Jan 2023 17:03:59 +0530 Subject: [PATCH 652/953] fs/resctrl: Remove the limit on the number of CLOSID ANBZ: #8686 commit b530deed244d9b45f3bce3cccde91f6ed0ebf7ea morse-linux. At the moment, number of resource control group (user can create) is limited to 32. Remove the limit. ffs() returns '1' for bit 0, hence the existing code subtracts 1 from the index to get the CLOSID value. find_first_bit() returns the bit number which does not need adjusting. Signed-off-by: Amit Singh Tomar [ morse: fixed the off-by-one in the allocator and the wrong not-found value. Removed the limit. ] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- fs/resctrl/rdtgroup.c | 25 +++++++++++++------------ include/linux/resctrl.h | 5 ----- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 4062c0f93f85..c7ebb4d37b47 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -132,7 +132,7 @@ static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) */ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) { - return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); + return mpam_partid_max + 1; } u32 resctrl_arch_system_num_rmid_idx(void) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index 936fc6e47386..ea969ddb1a9d 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -123,8 +123,8 @@ static bool resctrl_is_mbm_event(int e) } /* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. + * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap + * of free CLOSIDs. * * Using a global CLOSID across all resources has some advantages and * some drawbacks: @@ -137,7 +137,7 @@ static bool resctrl_is_mbm_event(int e) * - Our choices on how to configure each resource become progressively more * limited as the number of resources grows. */ -static unsigned long closid_free_map; +static unsigned long *closid_free_map; static int closid_free_map_len; int closids_supported(void) @@ -148,16 +148,17 @@ int closids_supported(void) static void closid_init(void) { struct resctrl_schema *s; - u32 rdt_min_closid = 32; + u32 rdt_min_closid = ~0; /* Compute rdt_min_closid across all resources */ list_for_each_entry(s, &resctrl_schema_all, list) rdt_min_closid = min(rdt_min_closid, s->num_closid); - closid_free_map = BIT_MASK(rdt_min_closid) - 1; + closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL); + bitmap_fill(closid_free_map, rdt_min_closid); /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ - __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); + __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map); closid_free_map_len = rdt_min_closid; } @@ -175,12 +176,12 @@ static int closid_alloc(void) return cleanest_closid; closid = cleanest_closid; } else { - closid = ffs(closid_free_map); - if (closid == 0) + closid = find_first_bit(closid_free_map, closid_free_map_len); + if (closid == closid_free_map_len) return -ENOSPC; - closid--; } - __clear_bit(closid, &closid_free_map); + + __clear_bit(closid, closid_free_map); return closid; } @@ -189,7 +190,7 @@ void closid_free(int closid) { lockdep_assert_held(&rdtgroup_mutex); - __set_bit(closid, &closid_free_map); + __set_bit(closid, closid_free_map); } /** @@ -203,7 +204,7 @@ bool closid_allocated(unsigned int closid) { lockdep_assert_held(&rdtgroup_mutex); - return !test_bit(closid, &closid_free_map); + return !test_bit(closid, closid_free_map); } /** diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 00cc0457af50..dc80ddab26cf 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -30,11 +30,6 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/* - * Resctrl uses a u32 as a closid bitmap. The maximum closid is 32. - */ -#define RESCTRL_MAX_CLOSID 32 - /* * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is * 32. -- Gitee From 1d125be6903139a2086ecc82b4565cfd32b328b8 Mon Sep 17 00:00:00 2001 From: Tyler Fanelli Date: Tue, 19 Sep 2023 22:40:01 -0400 Subject: [PATCH 653/953] docs/fuse-io: Document the usage of DIRECT_IO_ALLOW_MMAP ANBZ: #8700 commit 11ca77cdcca17cec909d2b97404ddacfec0acafd upstream. By default, shared mmap is disabled in FUSE DIRECT_IO mode. However, when the DIRECT_IO_ALLOW_MMAP flag is enabled in the FUSE_INIT reply, shared mmap is allowed. Signed-off-by: Tyler Fanelli Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- Documentation/filesystems/fuse-io.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/fuse-io.rst b/Documentation/filesystems/fuse-io.rst index 255a368fe534..6464de4266ad 100644 --- a/Documentation/filesystems/fuse-io.rst +++ b/Documentation/filesystems/fuse-io.rst @@ -15,7 +15,8 @@ The direct-io mode can be selected with the FOPEN_DIRECT_IO flag in the FUSE_OPEN reply. In direct-io mode the page cache is completely bypassed for reads and writes. -No read-ahead takes place. Shared mmap is disabled. +No read-ahead takes place. Shared mmap is disabled by default. To allow shared +mmap, the FUSE_DIRECT_IO_ALLOW_MMAP flag may be enabled in the FUSE_INIT reply. In cached mode reads may be satisfied from the page cache, and data may be read-ahead by the kernel to fill the cache. The cache is always kept consistent -- Gitee From 3c6192f0a1363a284d3da6985c88716a41c068ef Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Tue, 22 Aug 2023 21:48:18 +0200 Subject: [PATCH 654/953] fuse: create helper function if DIO write needs exclusive lock ANBZ: #8700 commit 699cf8246ee4c2c524f18c2e395909d16e7fda1b upstream. This makes the code a bit easier to read and allows to more easily add more conditions when an exclusive lock is needed. Signed-off-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 63 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 18 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index cc9651a01351..8273b3d6ec72 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1298,6 +1298,47 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) return res; } +static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); +} + +/* + * @return true if an exclusive lock for direct IO writes is needed + */ +static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(iocb->ki_filp); + + /* Server side has to advise that it supports parallel dio writes. */ + if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) + return true; + + /* + * Append will need to know the eventual EOF - always needs an + * exclusive lock. + */ + if (iocb->ki_flags & IOCB_APPEND) + return true; + + /* + * Combination of page access and direct-io is difficult, shared locks + * actually introduce a conflict. + */ + if (get_fuse_conn(inode)->direct_io_allow_mmap) + return true; + + /* Parallel dio beyond EOF is not supported, at least for now. */ + if (fuse_io_past_eof(iocb, from)) + return true; + + return false; +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1557,26 +1598,12 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) return res; } -static bool fuse_direct_write_extending_i_size(struct kiocb *iocb, - struct iov_iter *iter) -{ - struct inode *inode = file_inode(iocb->ki_filp); - - return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); -} - static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - struct file *file = iocb->ki_filp; - struct fuse_file *ff = file->private_data; struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = - !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) || - get_fuse_conn(inode)->direct_io_allow_mmap || - iocb->ki_flags & IOCB_APPEND || - fuse_direct_write_extending_i_size(iocb, from); + bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); /* * Take exclusive lock if @@ -1590,10 +1617,10 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) else { inode_lock_shared(inode); - /* A race with truncate might have come up as the decision for - * the lock type was done without holding the lock, check again. + /* + * Previous check was without any lock and might have raced. */ - if (fuse_direct_write_extending_i_size(iocb, from)) { + if (fuse_io_past_eof(iocb, from)) { inode_unlock_shared(inode); inode_lock(inode); exclusive_lock = true; -- Gitee From b21916e4dc197e53c8e25ad2fa3e75581517faf6 Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Sun, 24 Dec 2023 00:05:53 +0100 Subject: [PATCH 655/953] fuse: add fuse_dio_lock/unlock helper functions ANBZ: #8700 commit 9bbb6717dfd286a2861ca33273f4d7c3e65423b0 upstream. So far this is just a helper to remove complex locking logic out of fuse_direct_write_iter. Especially needed by the next patch in the series to that adds the fuse inode cache IO mode and adds in even more locking complexity. Signed-off-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 61 ++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8273b3d6ec72..a495ef704e5b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1339,6 +1339,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from return false; } +static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, + bool *exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); + if (*exclusive) { + inode_lock(inode); + } else { + inode_lock_shared(inode); + /* + * Previous check was without inode lock and might have raced, + * check again. + */ + if (fuse_io_past_eof(iocb, from)) { + inode_unlock_shared(inode); + inode_lock(inode); + *exclusive = true; + } + } +} + +static void fuse_dio_unlock(struct inode *inode, bool exclusive) +{ + if (exclusive) { + inode_unlock(inode); + } else { + inode_unlock_shared(inode); + } +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1603,30 +1634,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); - - /* - * Take exclusive lock if - * - Parallel direct writes are disabled - a user space decision - * - Parallel direct writes are enabled and i_size is being extended. - * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP). - * This might not be needed at all, but needs further investigation. - */ - if (exclusive_lock) - inode_lock(inode); - else { - inode_lock_shared(inode); - - /* - * Previous check was without any lock and might have raced. - */ - if (fuse_io_past_eof(iocb, from)) { - inode_unlock_shared(inode); - inode_lock(inode); - exclusive_lock = true; - } - } + bool exclusive; + fuse_dio_lock(iocb, from, &exclusive); res = generic_write_checks(iocb, from); if (res > 0) { if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { @@ -1637,10 +1647,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_write_update_attr(inode, iocb->ki_pos, res); } } - if (exclusive_lock) - inode_unlock(inode); - else - inode_unlock_shared(inode); + fuse_dio_unlock(inode, exclusive); return res; } -- Gitee From 0d28d0d6a2caf6ae1fec1785011e43fc6987f930 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 13:48:59 +0200 Subject: [PATCH 656/953] fuse: factor out helper fuse_truncate_update_attr() ANBZ: #8700 commit 0c9d708953d02f74cea05a01cf3e2c8f5a9fbaf4 upstream. fuse_finish_open() is called from fuse_open_common() and from fuse_create_open(). In the latter case, the O_TRUNC flag is always cleared in finish_open()m before calling into fuse_finish_open(). Move the bits that update attribute cache post O_TRUNC open into a helper and call this helper from fuse_open_common() directly. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a495ef704e5b..50f4831e7003 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -204,30 +204,31 @@ void fuse_finish_open(struct inode *inode, struct file *file) else if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { - struct fuse_inode *fi = get_fuse_inode(inode); - - spin_lock(&fi->lock); - fi->attr_version = atomic64_inc_return(&fc->attr_version); - i_size_write(inode, 0); - spin_unlock(&fi->lock); - file_update_time(file); - fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); - } if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); } +static void fuse_truncate_update_attr(struct inode *inode, struct file *file) +{ + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + i_size_write(inode, 0); + spin_unlock(&fi->lock); + file_update_time(file); + fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); +} + int fuse_open_common(struct inode *inode, struct file *file, bool isdir) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn *fc = fm->fc; int err; - bool is_wb_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && - fc->writeback_cache; - bool dax_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && FUSE_IS_DAX(inode); + bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; + bool is_wb_truncate = is_truncate && fc->writeback_cache; + bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); if (fuse_is_bad(inode)) return -EIO; @@ -250,15 +251,18 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) fuse_set_nowrite(inode); err = fuse_do_open(fm, get_node_id(inode), file, isdir); - if (!err) + if (!err) { fuse_finish_open(inode, file); + if (is_truncate) + fuse_truncate_update_attr(inode, file); + } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { struct fuse_file *ff = file->private_data; - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) + if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); -- Gitee From ac76d07a28e0e1a6a37355bb49ea8556abd99248 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 15:30:05 +0200 Subject: [PATCH 657/953] fuse: allocate ff->release_args only if release is needed ANBZ: #8700 commit e26ee4efbc79610b20e7abe9d96c87f33dacc1ff upstream. This removed the need to pass isdir argument to fuse_put_file(). Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 2 +- fs/fuse/file.c | 69 +++++++++++++++++++++++++++--------------------- fs/fuse/fuse_i.h | 2 +- 3 files changed, 41 insertions(+), 32 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index a8a7fc0e1754..053b891cbdf8 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -634,7 +634,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, goto out_err; err = -ENOMEM; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, true); if (!ff) goto out_put_forget_req; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 50f4831e7003..eba33b286089 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -55,7 +55,7 @@ struct fuse_release_args { struct inode *inode; }; -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) { struct fuse_file *ff; @@ -64,11 +64,13 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) return NULL; ff->fm = fm; - ff->release_args = kzalloc(sizeof(*ff->release_args), - GFP_KERNEL_ACCOUNT); - if (!ff->release_args) { - kfree(ff); - return NULL; + if (release) { + ff->release_args = kzalloc(sizeof(*ff->release_args), + GFP_KERNEL_ACCOUNT); + if (!ff->release_args) { + kfree(ff); + return NULL; + } } INIT_LIST_HEAD(&ff->write_entry); @@ -104,14 +106,14 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, kfree(ra); } -static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (refcount_dec_and_test(&ff->count)) { - struct fuse_args *args = &ff->release_args->args; + struct fuse_release_args *ra = ff->release_args; + struct fuse_args *args = (ra ? &ra->args : NULL); - if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { - /* Do nothing when client does not implement 'open' */ - fuse_release_end(ff->fm, args, 0); + if (!args) { + /* Do nothing when server does not implement 'open' */ } else if (sync) { fuse_simple_request(ff->fm, args); fuse_release_end(ff->fm, args, 0); @@ -131,15 +133,16 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, struct fuse_conn *fc = fm->fc; struct fuse_file *ff; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; + bool open = isdir ? !fc->no_opendir : !fc->no_open; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, open); if (!ff) return ERR_PTR(-ENOMEM); ff->fh = 0; /* Default for no-open */ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); - if (isdir ? !fc->no_opendir : !fc->no_open) { + if (open) { struct fuse_open_out outarg; int err; @@ -147,11 +150,13 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, if (!err) { ff->fh = outarg.fh; ff->open_flags = outarg.open_flags; - } else if (err != -ENOSYS) { fuse_file_free(ff); return ERR_PTR(err); } else { + /* No release needed */ + kfree(ff->release_args); + ff->release_args = NULL; if (isdir) fc->no_opendir = 1; else @@ -277,7 +282,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) } static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, - unsigned int flags, int opcode) + unsigned int flags, int opcode, bool sync) { struct fuse_conn *fc = ff->fm->fc; struct fuse_release_args *ra = ff->release_args; @@ -295,6 +300,9 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, wake_up_interruptible_all(&ff->poll_wait); + if (!ra) + return; + ra->inarg.fh = ff->fh; ra->inarg.flags = flags; ra->args.in_numargs = 1; @@ -304,6 +312,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, ra->args.nodeid = ff->nodeid; ra->args.force = true; ra->args.nocreds = true; + + /* + * Hold inode until release is finished. + * From fuse_sync_release() the refcount is 1 and everything's + * synchronous, so we are fine with not doing igrab() here. + */ + ra->inode = sync ? NULL : igrab(&fi->inode); } void fuse_file_release(struct inode *inode, struct fuse_file *ff, @@ -313,14 +328,12 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, struct fuse_release_args *ra = ff->release_args; int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; - fuse_prepare_release(fi, ff, open_flags, opcode); + fuse_prepare_release(fi, ff, open_flags, opcode, false); - if (ff->flock) { + if (ra && ff->flock) { ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); } - /* Hold inode until release is finished */ - ra->inode = igrab(inode); /* * Normally this will send the RELEASE request, however if @@ -331,7 +344,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. */ - fuse_file_put(ff, ff->fm->fc->destroy, isdir); + fuse_file_put(ff, ff->fm->fc->destroy); } void fuse_release_common(struct file *file, bool isdir) @@ -366,12 +379,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags) { WARN_ON(refcount_read(&ff->count) > 1); - fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); - /* - * iput(NULL) is a no-op and since the refcount is 1 and everything's - * synchronous, we are fine with not doing igrab() here" - */ - fuse_file_put(ff, true, false); + fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); @@ -928,7 +937,7 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, put_page(page); } if (ia->ff) - fuse_file_put(ia->ff, false, false); + fuse_file_put(ia->ff, false); fuse_io_free(ia); } @@ -1704,7 +1713,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) __free_page(ap->pages[i]); if (wpa->ia.ff) - fuse_file_put(wpa->ia.ff, false, false); + fuse_file_put(wpa->ia.ff, false); kfree(ap->pages); kfree(wpa); @@ -1946,7 +1955,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) ff = __fuse_write_file_get(fi); err = fuse_flush_times(inode, ff); if (ff) - fuse_file_put(ff, false, false); + fuse_file_put(ff, false); return err; } @@ -2344,7 +2353,7 @@ static int fuse_writepages(struct address_space *mapping, fuse_writepages_send(&data); } if (data.ff) - fuse_file_put(data.ff, false, false); + fuse_file_put(data.ff, false); kfree(data.orig_pages); out: diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index a1bdc9662512..a2d78d536059 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1036,7 +1036,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, */ int fuse_open_common(struct inode *inode, struct file *file, bool isdir); -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm); +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); -- Gitee From 5ae3ed454b17350eb44a548171d196aa296f6ffa Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 2 Feb 2024 13:30:30 +0200 Subject: [PATCH 658/953] fuse: break up fuse_open_common() ANBZ: #8700 commit 7de64d521bf92396b7da8ae0600188ea5d75a4c9 upstream. fuse_open_common() has a lot of code relevant only for regular files and O_TRUNC in particular. Copy the little bit of remaining code into fuse_dir_open() and stop using this common helper for directory open. Also split out fuse_dir_finish_open() from fuse_finish_open() before we add inode io modes to fuse_finish_open(). Suggested-by: Miklos Szeredi Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 25 ++++++++++++++++++++++++- fs/fuse/file.c | 9 ++------- fs/fuse/fuse_i.h | 5 ----- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 053b891cbdf8..4d5cff7a5daa 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1634,7 +1634,30 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, static int fuse_dir_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, true); + struct fuse_mount *fm = get_fuse_mount(inode); + int err; + + if (fuse_is_bad(inode)) + return -EIO; + + err = generic_file_open(inode, file); + if (err) + return err; + + err = fuse_do_open(fm, get_node_id(inode), file, true); + if (!err) { + struct fuse_file *ff = file->private_data; + + /* + * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for + * directories for backward compatibility, though it's unlikely + * to be useful. + */ + if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) + nonseekable_open(inode, file); + } + + return err; } static int fuse_dir_release(struct inode *inode, struct file *file) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index eba33b286089..214b0740dc3c 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -226,7 +226,7 @@ static void fuse_truncate_update_attr(struct inode *inode, struct file *file) fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); } -int fuse_open_common(struct inode *inode, struct file *file, bool isdir) +static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn *fc = fm->fc; @@ -255,7 +255,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (is_wb_truncate || dax_truncate) fuse_set_nowrite(inode); - err = fuse_do_open(fm, get_node_id(inode), file, isdir); + err = fuse_do_open(fm, get_node_id(inode), file, false); if (!err) { fuse_finish_open(inode, file); if (is_truncate) @@ -353,11 +353,6 @@ void fuse_release_common(struct file *file, bool isdir) (fl_owner_t) file, isdir); } -static int fuse_open(struct inode *inode, struct file *file) -{ - return fuse_open_common(inode, file, false); -} - static int fuse_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = get_fuse_conn(inode); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index a2d78d536059..c49629e6f49b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1031,11 +1031,6 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, size_t count, int opcode); -/** - * Send OPEN or OPENDIR request - */ -int fuse_open_common(struct inode *inode, struct file *file, bool isdir); - struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); -- Gitee From 3236937838c5e332d4ad9bfa93fd31539704ca07 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 15:38:06 +0200 Subject: [PATCH 659/953] fuse: prepare for failing open response ANBZ: #8700 commit d2c487f150ae00e3cb9faf57aceacc584e0a130c upstream. In preparation for inode io modes, a server open response could fail due to conflicting inode io modes. Allow returning an error from fuse_finish_open() and handle the error in the callers. fuse_finish_open() is used as the callback of finish_open(), so that FMODE_OPENED will not be set if fuse_finish_open() fails. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 8 +++++--- fs/fuse/file.c | 15 ++++++++++----- fs/fuse/fuse_i.h | 2 +- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4d5cff7a5daa..3c11effe88c4 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -696,13 +696,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_dir_changed(dir); - err = finish_open(file, entry, generic_file_open); + err = generic_file_open(inode, file); + if (!err) { + file->private_data = ff; + err = finish_open(file, entry, fuse_finish_open); + } if (err) { fi = get_fuse_inode(inode); fuse_sync_release(fi, ff, flags); } else { - file->private_data = ff; - fuse_finish_open(inode, file); if (fm->fc->atomic_o_trunc && trunc) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 214b0740dc3c..84aa3cecca41 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -199,7 +199,7 @@ static void fuse_link_write_file(struct file *file) spin_unlock(&fi->lock); } -void fuse_finish_open(struct inode *inode, struct file *file) +int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); @@ -211,6 +211,8 @@ void fuse_finish_open(struct inode *inode, struct file *file) if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); + + return 0; } static void fuse_truncate_update_attr(struct inode *inode, struct file *file) @@ -229,7 +231,9 @@ static void fuse_truncate_update_attr(struct inode *inode, struct file *file) static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = fm->fc; + struct fuse_file *ff; int err; bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; bool is_wb_truncate = is_truncate && fc->writeback_cache; @@ -257,16 +261,17 @@ static int fuse_open(struct inode *inode, struct file *file) err = fuse_do_open(fm, get_node_id(inode), file, false); if (!err) { - fuse_finish_open(inode, file); - if (is_truncate) + ff = file->private_data; + err = fuse_finish_open(inode, file); + if (err) + fuse_sync_release(fi, ff, file->f_flags); + else if (is_truncate) fuse_truncate_update_attr(inode, file); } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { - struct fuse_file *ff = file->private_data; - if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c49629e6f49b..06337972ef81 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1033,7 +1033,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); -void fuse_finish_open(struct inode *inode, struct file *file); +int fuse_finish_open(struct inode *inode, struct file *file); void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags); -- Gitee From ecf385497412e16640c2c9fb1f7c3c719814fdde Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 16:26:15 +0200 Subject: [PATCH 660/953] fuse: introduce inode io modes ANBZ: #8700 commit cb098dd24bab8a315aa00bab1ccddb6be872156d upstream. The fuse inode io mode is determined by the mode of its open files/mmaps and parallel dio opens and expressed in the value of fi->iocachectr: > 0 - caching io: files open in caching mode or mmap on direct_io file < 0 - parallel dio: direct io mode with parallel dio writes enabled == 0 - direct io: no files open in caching mode and no files mmaped Note that iocachectr value of 0 might become positive or negative, while non-parallel dio is getting processed. direct_io mmap uses page cache, so first mmap will mark the file as ff->io_opened and increment fi->iocachectr to enter the caching io mode. If the server opens the file in caching mode while it is already open for parallel dio or vice versa the open fails. This allows executing parallel dio when inode is not in caching mode and no mmaps have been performed on the inode in question. Signed-off-by: Bernd Schubert Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/Makefile | 1 + fs/fuse/file.c | 15 +++++ fs/fuse/fuse_i.h | 17 ++++- fs/fuse/iomode.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 2 deletions(-) create mode 100644 fs/fuse/iomode.c diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 8dfb7f9c1f58..d2bad0c85d8c 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_VIRTIO_FS) += virtiofs.o obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o +fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 84aa3cecca41..37c41f409008 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -112,6 +112,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) struct fuse_release_args *ra = ff->release_args; struct fuse_args *args = (ra ? &ra->args : NULL); + if (ra && ra->inode) + fuse_file_io_release(ff, ra->inode); + if (!args) { /* Do nothing when server does not implement 'open' */ } else if (sync) { @@ -203,6 +206,11 @@ int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); + int err; + + err = fuse_file_io_open(file, inode); + if (err) + return err; if (ff->open_flags & FOPEN_STREAM) stream_open(inode, file); @@ -2508,6 +2516,7 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fm->fc; + int rc; /* DAX mmap is superior to direct_io mmap */ if (FUSE_IS_DAX(file_inode(file))) @@ -2527,6 +2536,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) /* MAP_PRIVATE */ return generic_file_mmap(file, vma); } + + /* First mmap of direct_io file enters caching inode io mode. */ + rc = fuse_file_cached_io_start(file_inode(file), ff); + if (rc) + return rc; } if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) @@ -3295,6 +3309,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); fi->writectr = 0; + fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); fi->writepages = RB_ROOT; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 06337972ef81..a023c94c4565 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -112,7 +112,7 @@ struct fuse_inode { u64 attr_version; union { - /* Write related fields (regular file only) */ + /* read/write io cache (regular file only) */ struct { /* Files usable in writepage. Protected by fi->lock */ struct list_head write_files; @@ -124,6 +124,9 @@ struct fuse_inode { * (FUSE_NOWRITE) means more writes are blocked */ int writectr; + /** Number of files/maps using page cache */ + int iocachectr; + /* Waitq for writepage completion */ wait_queue_head_t page_waitq; @@ -188,6 +191,8 @@ enum { FUSE_I_BAD, /* Has btime */ FUSE_I_BTIME, + /* Wants or already has page cache IO */ + FUSE_I_CACHE_IO_MODE, }; struct fuse_conn; @@ -245,6 +250,9 @@ struct fuse_file { /** Wait queue head for poll */ wait_queue_head_t poll_wait; + /** Does file hold a fi->iocachectr refcount? */ + enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode; + /** Has flock been performed on this file? */ bool flock:1; }; @@ -1343,8 +1351,13 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); -/* file.c */ +/* iomode.c */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_io_open(struct file *file, struct inode *inode); +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); + +/* file.c */ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, bool isdir); void fuse_file_release(struct inode *inode, struct fuse_file *ff, diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c new file mode 100644 index 000000000000..a1a836b2aacc --- /dev/null +++ b/fs/fuse/iomode.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE inode io modes. + * + * Copyright (c) 2024 CTERA Networks. + */ + +#include "fuse_i.h" + +#include +#include +#include +#include + +/* + * Start cached io mode, where parallel dio writes are not allowed. + */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + /* There are no io modes if server does not implement open */ + if (!ff->release_args) + return 0; + + spin_lock(&fi->lock); + if (fi->iocachectr < 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode == IOM_UNCACHED); + if (ff->iomode == IOM_NONE) { + ff->iomode = IOM_CACHED; + if (fi->iocachectr == 0) + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + fi->iocachectr++; + } +unlock: + spin_unlock(&fi->lock); + return err; +} + +static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr <= 0); + WARN_ON(ff->iomode != IOM_CACHED); + ff->iomode = IOM_NONE; + fi->iocachectr--; + if (fi->iocachectr == 0) + clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); +} + +/* Start strictly uncached io mode where cache access is not allowed */ +static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + spin_lock(&fi->lock); + if (fi->iocachectr > 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode != IOM_NONE); + fi->iocachectr--; + ff->iomode = IOM_UNCACHED; +unlock: + spin_unlock(&fi->lock); + return err; +} + +static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr >= 0); + WARN_ON(ff->iomode != IOM_UNCACHED); + ff->iomode = IOM_NONE; + fi->iocachectr++; + spin_unlock(&fi->lock); +} + +/* Request access to submit new io to inode via open file */ +int fuse_file_io_open(struct file *file, struct inode *inode) +{ + struct fuse_file *ff = file->private_data; + int err; + + /* + * io modes are not relevant with DAX and with server that does not + * implement open. + */ + if (FUSE_IS_DAX(inode) || !ff->release_args) + return 0; + + /* + * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO. + */ + if (!(ff->open_flags & FOPEN_DIRECT_IO)) + ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; + + /* + * First parallel dio open denies caching inode io mode. + * First caching file open enters caching inode io mode. + * + * Note that if user opens a file open with O_DIRECT, but server did + * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, + * so we put the inode in caching mode to prevent parallel dio. + */ + if (ff->open_flags & FOPEN_DIRECT_IO) { + if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) + err = fuse_file_uncached_io_start(inode, ff); + else + return 0; + } else { + err = fuse_file_cached_io_start(inode, ff); + } + if (err) + goto fail; + + return 0; + +fail: + pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n", + ff->open_flags, err); + /* + * The file open mode determines the inode io mode. + * Using incorrect open mode is a server mistake, which results in + * user visible failure of open() with EIO error. + */ + return -EIO; +} + +/* No more pending io and no new io possible to inode via open/mmapped file */ +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) +{ + /* + * Last parallel dio close allows caching inode io mode. + * Last caching file close exits caching inode io mode. + */ + switch (ff->iomode) { + case IOM_NONE: + /* Nothing to do */ + break; + case IOM_UNCACHED: + fuse_file_uncached_io_end(inode, ff); + break; + case IOM_CACHED: + fuse_file_cached_io_end(inode, ff); + break; + } +} -- Gitee From 54d3a496188a51f01d80bb362d67d36a00637992 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 9 Feb 2024 16:54:37 +0200 Subject: [PATCH 661/953] fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP ANBZ: #8700 commit 205c1d8026835746d8597e1aa70c370e014e83fa upstream. Instead of denying caching mode on parallel dio open, deny caching open only while parallel dio are in-progress and wait for in-progress parallel dio writes before entering inode caching io mode. This allows executing parallel dio when inode is not in caching mode even if shared mmap is allowed, but no mmaps have been performed on the inode in question. An mmap on direct_io file now waits for all in-progress parallel dio writes to complete, so parallel dio writes together with FUSE_DIRECT_IO_ALLOW_MMAP is enabled by this commit. Signed-off-by: Bernd Schubert Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 41 +++++++++++++++++++++++++++++------------ fs/fuse/fuse_i.h | 5 +++++ fs/fuse/iomode.c | 48 ++++++++++++++++++++++++++++++------------------ 3 files changed, 64 insertions(+), 30 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 37c41f409008..68acf361566e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1334,6 +1334,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_inode *fi = get_fuse_inode(inode); /* Server side has to advise that it supports parallel dio writes. */ if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) @@ -1346,12 +1347,9 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from if (iocb->ki_flags & IOCB_APPEND) return true; - /* - * Combination of page access and direct-io is difficult, shared locks - * actually introduce a conflict. - */ - if (get_fuse_conn(inode)->direct_io_allow_mmap) - return true; + /* shared locks are not allowed with parallel page cache IO */ + if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) + return false; /* Parallel dio beyond EOF is not supported, at least for now. */ if (fuse_io_past_eof(iocb, from)) @@ -1364,6 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, bool *exclusive) { struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); if (*exclusive) { @@ -1371,10 +1370,14 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, } else { inode_lock_shared(inode); /* - * Previous check was without inode lock and might have raced, - * check again. + * New parallal dio allowed only if inode is not in caching + * mode and denies new opens in caching mode. This check + * should be performed only after taking shared inode lock. + * Previous past eof check was without inode lock and might + * have raced, so check it again. */ - if (fuse_io_past_eof(iocb, from)) { + if (fuse_io_past_eof(iocb, from) || + fuse_file_uncached_io_start(inode, ff) != 0) { inode_unlock_shared(inode); inode_lock(inode); *exclusive = true; @@ -1382,11 +1385,16 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, } } -static void fuse_dio_unlock(struct inode *inode, bool exclusive) +static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) { + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + if (exclusive) { inode_unlock(inode); } else { + /* Allow opens in caching mode after last parallel dio end */ + fuse_file_uncached_io_end(inode, ff); inode_unlock_shared(inode); } } @@ -1668,7 +1676,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_write_update_attr(inode, iocb->ki_pos, res); } } - fuse_dio_unlock(inode, exclusive); + fuse_dio_unlock(iocb, exclusive); return res; } @@ -2522,6 +2530,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) if (FUSE_IS_DAX(file_inode(file))) return fuse_dax_mmap(file, vma); + /* + * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, + * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. + */ if (ff->open_flags & FOPEN_DIRECT_IO) { /* * Can't provide the coherency needed for MAP_SHARED @@ -2537,7 +2549,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) return generic_file_mmap(file, vma); } - /* First mmap of direct_io file enters caching inode io mode. */ + /* + * First mmap of direct_io file enters caching inode io mode. + * Also waits for parallel dio writers to go into serial mode + * (exclusive instead of shared lock). + */ rc = fuse_file_cached_io_start(file_inode(file), ff); if (rc) return rc; @@ -3311,6 +3327,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) fi->writectr = 0; fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); + init_waitqueue_head(&fi->direct_io_waitq); fi->writepages = RB_ROOT; if (IS_ENABLED(CONFIG_FUSE_DAX)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index a023c94c4565..fe775e4f91ef 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -130,6 +130,9 @@ struct fuse_inode { /* Waitq for writepage completion */ wait_queue_head_t page_waitq; + /* waitq for direct-io completion */ + wait_queue_head_t direct_io_waitq; + /* List of writepage requestst (pending or sent) */ struct rb_root writepages; }; @@ -1353,6 +1356,8 @@ int fuse_fileattr_set(struct mnt_idmap *idmap, /* iomode.c */ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff); +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff); int fuse_file_io_open(struct file *file, struct inode *inode); void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c index a1a836b2aacc..ea47c76b9df1 100644 --- a/fs/fuse/iomode.c +++ b/fs/fuse/iomode.c @@ -13,21 +13,37 @@ #include /* - * Start cached io mode, where parallel dio writes are not allowed. + * Return true if need to wait for new opens in caching mode. + */ +static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi) +{ + return READ_ONCE(fi->iocachectr) < 0; +} + +/* + * Start cached io mode. + * + * Blocks new parallel dio writes and waits for the in-progress parallel dio + * writes to complete. */ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); - int err = 0; /* There are no io modes if server does not implement open */ if (!ff->release_args) return 0; spin_lock(&fi->lock); - if (fi->iocachectr < 0) { - err = -ETXTBSY; - goto unlock; + /* + * Setting the bit advises new direct-io writes to use an exclusive + * lock - without it the wait below might be forever. + */ + while (fuse_is_io_cache_wait(fi)) { + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); + wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi)); + spin_lock(&fi->lock); } WARN_ON(ff->iomode == IOM_UNCACHED); if (ff->iomode == IOM_NONE) { @@ -36,9 +52,8 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); fi->iocachectr++; } -unlock: spin_unlock(&fi->lock); - return err; + return 0; } static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) @@ -56,7 +71,7 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) } /* Start strictly uncached io mode where cache access is not allowed */ -static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); int err = 0; @@ -74,7 +89,7 @@ static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff return err; } -static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); @@ -83,6 +98,8 @@ static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) WARN_ON(ff->iomode != IOM_UNCACHED); ff->iomode = IOM_NONE; fi->iocachectr++; + if (!fi->iocachectr) + wake_up(&fi->direct_io_waitq); spin_unlock(&fi->lock); } @@ -106,21 +123,16 @@ int fuse_file_io_open(struct file *file, struct inode *inode) ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; /* - * First parallel dio open denies caching inode io mode. * First caching file open enters caching inode io mode. * * Note that if user opens a file open with O_DIRECT, but server did * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, * so we put the inode in caching mode to prevent parallel dio. */ - if (ff->open_flags & FOPEN_DIRECT_IO) { - if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) - err = fuse_file_uncached_io_start(inode, ff); - else - return 0; - } else { - err = fuse_file_cached_io_start(inode, ff); - } + if (ff->open_flags & FOPEN_DIRECT_IO) + return 0; + + err = fuse_file_cached_io_start(inode, ff); if (err) goto fail; -- Gitee From ece472dd1acb899fcefa951e0527c704cfa0f861 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 18 Jan 2024 19:46:37 +0800 Subject: [PATCH 662/953] anolis: irqchip/loongson-eiointc: Skip handling if there is no pending interrupt ANBZ: #8689 commit 3eece72ded7f ("irqchip/loongson-eiointc: Skip handling if there is no pending interrupt") It is one simple optimization in the interrupt dispatch function eiointc_irq_dispatch(). There are 256 IRQs supported for eiointc on Loongson-3A5000 and Loongson-2K2000 platform, 128 IRQs on Loongson-2K0500 platform, eiointc irq handler reads the bitmap and find pending irqs when irq happens. So there are several consecutive iocsr_read64 operations for the all bits to find all pending irqs. If the pending bitmap is zero, it means that there is no pending irq for the this irq bitmap range, we can skip handling to avoid some useless operations such as clearing hw ISR. Signed-off-by: Bibo Mao Acked-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 79715b9f80d6..804dbfa48016 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -212,6 +212,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { int bit = __ffs(pending); -- Gitee From 95051fcd5d0f12331911ee9ef8cfbde4b76ceb02 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 18 Jan 2024 19:52:46 +0800 Subject: [PATCH 663/953] anolis: irqchip/loongson-eiointc: Remove explicit interrupt affinity restore on resume ANBZ: #8689 commit 83c0708719f7 ("irqchip/loongson-eiointc: Remove explicit interrupt affinity restore on resume") During suspend all CPUs except CPU0 are hot-unpluged and all active interrupts are migrated to CPU0. On resume eiointc_router_init() affines all interrupts to CPU0, so the subsequent explicit interrupt affinity restore is redundant. Remove it. [ tglx: Rewrote changelog ] Signed-off-by: Bibo Mao Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240130082722.2912576-4-maobibo@loongson.cn -------------------------------- During suspend and resume, CPUs except CPU0 can be hot-unpluged and IRQs will be migrated to CPU0. So it is not necessary to restore irq affinity for eiointc irq controller when system resumes. This patch removes this piece of code about irq affinity restoring in function eiointc_resume(). Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 804dbfa48016..1daa87163d03 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -324,23 +324,7 @@ static int eiointc_suspend(void) static void eiointc_resume(void) { - int i, j; - struct irq_desc *desc; - struct irq_data *irq_data; - eiointc_router_init(0); - - for (i = 0; i < nr_pics; i++) { - for (j = 0; j < eiointc_priv[0]->vec_count; j++) { - desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); - if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { - raw_spin_lock(&desc->lock); - irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); - raw_spin_unlock(&desc->lock); - } - } - } } static struct syscore_ops eiointc_syscore_ops = { -- Gitee From 7b12ed02d2db7001b0b1b4c2509661eabcb25e96 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 664/953] anolis: LoongArch: KVM: Start SW timer only when vcpu is blocking ANBZ: #8689 commit 8bc15d02d5fd ("LoongArch: KVM: Start SW timer only when vcpu is blocking") SW timer is enabled when vcpu thread is scheduled out, and it is to wake up vcpu from blocked queue. If vcpu thread is scheduled out but is not blocked, such as it is preempted by other threads, it is not necessary to enable SW timer. Since vcpu thread is still on running queue if it is preempted and SW timer is only to wake up vcpu on blocking queue, so SW timer is not useful in this situation. This patch enables SW timer only when vcpu is scheduled out and is blocking. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/timer.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 111328f60872..b0dafe0611ab 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -93,7 +93,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) /* * Freeze the soft-timer and sync the guest stable timer with it. */ - hrtimer_cancel(&vcpu->arch.swtimer); + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); /* * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 @@ -168,26 +169,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) * Here judge one-shot timer fired by checking whether TVAL is larger * than TCFG */ - if (ticks < cfg) { + if (ticks < cfg) delta = tick_to_ns(vcpu, ticks); - expire = ktime_add_ns(ktime_get(), delta); - vcpu->arch.expire = expire; + else + delta = 0; + + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (kvm_vcpu_is_blocking(vcpu)) { /* * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time */ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else if (vcpu->stat.generic.blocking) { - /* - * Inject timer interrupt so that halt polling can dectect and exit. - * VCPU is scheduled out already and sleeps in rcuwait queue and - * will not poll pending events again. kvm_queue_irq() is not enough, - * hrtimer swtimer should be used here. - */ - expire = ktime_add_ns(ktime_get(), 10); - vcpu->arch.expire = expire; - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); } } -- Gitee From c698ddf9506b8da6ddbbd05aa6a1ecfef9ddd8ed Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 665/953] anolis: LoongArch: KVM: Do not restart SW timer when it is expired ANBZ: #8689 commit f66228053e42 ("LoongArch: KVM: Do not restart SW timer when it is expired") LoongArch VCPUs have their own separate HW timers. SW timer is to wake up blocked vcpu thread, rather than HW timer emulation. When blocking vcpu scheduled out, SW timer is used to wakeup blocked vcpu thread and injects timer interrupt. It does not care about whether guest timer is in period mode or oneshot mode, and SW timer needs not to be restarted since vcpu has been woken. This patch does not restart SW timer when it is expired. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/timer.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index b0dafe0611ab..bcc6b6d063d9 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); } -/* - * Push timer forward on timeout. - * Handle an hrtimer event by push the hrtimer forward a period. - */ -static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) -{ - unsigned long cfg, period; - - /* Add periodic tick to current expire time */ - cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); - if (cfg & CSR_TCFG_PERIOD) { - period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); - hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); - return HRTIMER_RESTART; - } else - return HRTIMER_NORESTART; -} - /* Low level hrtimer wake routine */ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) { @@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) kvm_queue_irq(vcpu, INT_TI); rcuwait_wake_up(&vcpu->wait); - return kvm_count_timeout(vcpu); + return HRTIMER_NORESTART; } /* -- Gitee From ecb657e0ead957a0049fcd6afb452be7e2693cc2 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 666/953] anolis: LoongArch: KVM: Set reserved bits as zero in CPUCFG ANBZ: #8689 commit aebd3bd586c6 ("LoongArch: KVM: Set reserved bits as zero in CPUCFG") Supported CPUCFG information comes from function _kvm_get_cpucfg_mask(). A bit should be zero if it is reserved by HW or if it is not supported by KVM. Also LoongArch software page table walk feature defined in CPUCFG2_LSPW is supported by KVM, it should be enabled by default. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/vcpu.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 36106922b5d7..3a8779065f73 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -304,11 +304,18 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) return -EINVAL; switch (id) { - case 2: + case LOONGARCH_CPUCFG0: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG1: + /* CPUCFG1_MSGINT is not supported by KVM */ + *v = GENMASK(25, 0); + return 0; + case LOONGARCH_CPUCFG2: /* CPUCFG2 features unconditionally supported by KVM */ *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | - CPUCFG2_LAM; + CPUCFG2_LSPW | CPUCFG2_LAM; /* * For the ISA extensions listed below, if one is supported * by the host, then it is also supported by KVM. @@ -318,14 +325,26 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) if (cpu_has_lasx) *v |= CPUCFG2_LASX; + return 0; + case LOONGARCH_CPUCFG3: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG4: + case LOONGARCH_CPUCFG5: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG16: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20: + *v = GENMASK(30, 0); return 0; default: /* - * No restrictions on other valid CPUCFG IDs' values, but - * CPUCFG data is limited to 32 bits as the LoongArch ISA - * manual says (Volume 1, Section 2.2.10.5 "CPUCFG"). + * CPUCFG bits should be zero if reserved by HW or not + * supported by KVM. */ - *v = U32_MAX; + *v = 0; return 0; } } @@ -344,7 +363,7 @@ static int kvm_check_cpucfg(int id, u64 val) return -EINVAL; switch (id) { - case 2: + case LOONGARCH_CPUCFG2: if (!(val & CPUCFG2_LLFTP)) /* Guests must have a constant timer */ return -EINVAL; -- Gitee From 420c33af60cceec4210e23f6ed03c639b949e822 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 25 Mar 2024 14:37:44 +0800 Subject: [PATCH 667/953] anolis: LoongArch/smp: Refine some ipi functions on LoongArch platform ANBZ: #8689 It is code refine about ipi handling on LoongArch platform, there are three modifications. 1. Add generic function get_percpu_irq(), replacing some percpu irq functions such as get_ipi_irq()/get_pmc_irq()/get_timer_irq() with get_percpu_irq(). 2. Change definition about parameter action called by function loongson_send_ipi_single() and loongson_send_ipi_mask(), and it is defined as decimal encoding format at ipi sender side. Normal decimal encoding is used rather than binary bitmap encoding for ipi action, ipi hw sender uses decimal encoding code, and ipi receiver will get binary bitmap encoding, the ipi hw will convert it into bitmap in ipi message buffer. 3. Add structure smp_ops on LoongArch platform so that pv ipi can be used later. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/hardirq.h | 4 ++ arch/loongarch/include/asm/irq.h | 11 +++++- arch/loongarch/include/asm/smp.h | 31 +++++++-------- arch/loongarch/kernel/irq.c | 22 +---------- arch/loongarch/kernel/perf_event.c | 14 +------ arch/loongarch/kernel/smp.c | 58 +++++++++++++++++++--------- arch/loongarch/kernel/time.c | 12 +----- 7 files changed, 72 insertions(+), 80 deletions(-) diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 0ef3b18f8980..9f0038e19c7f 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -12,6 +12,10 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNCTION, +}; #define NR_IPI 2 typedef struct { diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index ed8e72db0dba..85a3315597b6 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -118,9 +118,18 @@ extern struct fwnode_handle *liointc_handle; extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; -extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); extern void fixup_irqs(void); +static inline int get_percpu_irq(int vector) +{ + struct irq_domain *d; + + d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + if (d) + return irq_create_mapping(d, vector); + + return -EINVAL; +} #include #endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index f81e5f01d619..75d30529748c 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -12,6 +12,13 @@ #include #include +struct smp_ops { + void (*init_ipi)(void); + void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); + void (*send_ipi_single)(int cpu, unsigned int action); +}; + +extern struct smp_ops smp_ops; extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; @@ -24,8 +31,6 @@ void loongson_prepare_cpus(unsigned int max_cpus); void loongson_boot_secondary(int cpu, struct task_struct *idle); void loongson_init_secondary(void); void loongson_smp_finish(void); -void loongson_send_ipi_single(int cpu, unsigned int action); -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); #ifdef CONFIG_HOTPLUG_CPU int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); @@ -59,9 +64,12 @@ extern int __cpu_logical_map[NR_CPUS]; #define cpu_physical_id(cpu) cpu_logical_map(cpu) -#define SMP_BOOT_CPU 0x1 -#define SMP_RESCHEDULE 0x2 -#define SMP_CALL_FUNCTION 0x4 +#define ACTION_BOOT_CPU 0 +#define ACTION_RESCHEDULE 1 +#define ACTION_CALL_FUNCTION 2 +#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) +#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) +#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) struct secondary_data { unsigned long stack; @@ -71,7 +79,8 @@ extern struct secondary_data cpuboot_data; extern asmlinkage void smpboot_entry(void); extern asmlinkage void start_secondary(void); - +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void calculate_cpu_foreign_map(void); /* @@ -79,16 +88,6 @@ extern void calculate_cpu_foreign_map(void); */ extern void show_ipi_list(struct seq_file *p, int prec); -static inline void arch_send_call_function_single_ipi(int cpu) -{ - loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); -} - -static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); -} - #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index a1c1d9576393..73cd3b7703e2 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -86,16 +86,6 @@ static void __init init_vec_parent_group(void) acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } -static int __init get_ipi_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_IPI); - - return -EINVAL; -} - #ifdef CONFIG_HOTPLUG_CPU static void handle_irq_affinity(void) { @@ -135,10 +125,6 @@ void fixup_irqs(void) void __init init_IRQ(void) { int i, ret; -#ifdef CONFIG_SMP - int r, ipi_irq; - static int ipi_dummy_dev; -#endif unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; @@ -154,13 +140,7 @@ void __init init_IRQ(void) irqchip_init(); } #ifdef CONFIG_SMP - ipi_irq = get_ipi_irq(); - if (ipi_irq < 0) - panic("IPI IRQ mapping failed\n"); - irq_set_percpu_devid(ipi_irq); - r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev); - if (r < 0) - panic("IPI IRQ request failed\n"); + smp_ops.init_ipi(); #endif for (i = 0; i < NR_IRQS; i++) diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index 0491bf453cd4..3265c8f33223 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu) static DEFINE_MUTEX(pmu_reserve_mutex); static atomic_t active_events = ATOMIC_INIT(0); -static int get_pmc_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_PCOV); - - return -EINVAL; -} - static void reset_counters(void *arg); static int __hw_perf_event_init(struct perf_event *event); @@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { on_each_cpu(reset_counters, NULL, 1); - free_irq(get_pmc_irq(), &loongarch_pmu); + free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu); mutex_unlock(&pmu_reserve_mutex); } } @@ -562,7 +552,7 @@ static int loongarch_pmu_event_init(struct perf_event *event) if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; - irq = get_pmc_irq(); + irq = get_percpu_irq(INT_PCOV); flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmu_reserve_mutex); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 5fbdd172a248..9c3557760b5b 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -67,11 +67,6 @@ static cpumask_t cpu_core_setup_map; struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNCTION, -}; - static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", @@ -191,24 +186,19 @@ static u32 ipi_read_clear(int cpu) static void ipi_write_action(int cpu, u32 action) { - unsigned int irq = 0; - - while ((irq = ffs(action))) { - uint32_t val = IOCSR_IPI_SEND_BLOCKING; + uint32_t val; - val |= (irq - 1); - val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); - iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); - action &= ~BIT(irq - 1); - } + val = IOCSR_IPI_SEND_BLOCKING | action; + val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); + iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); } -void loongson_send_ipi_single(int cpu, unsigned int action) +static void loongson_send_ipi_single(int cpu, unsigned int action) { ipi_write_action(cpu_logical_map(cpu), (u32)action); } -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) +static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; @@ -216,6 +206,16 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) ipi_write_action(cpu_logical_map(i), (u32)action); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION); +} + /* * This function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -223,11 +223,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) */ void arch_smp_send_reschedule(int cpu) { - loongson_send_ipi_single(cpu, SMP_RESCHEDULE); + smp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); -irqreturn_t loongson_ipi_interrupt(int irq, void *dev) +static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { unsigned int action; unsigned int cpu = smp_processor_id(); @@ -247,6 +247,26 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) return IRQ_HANDLED; } +static void loongson_init_ipi(void) +{ + int r, ipi_irq; + + ipi_irq = get_percpu_irq(INT_IPI); + if (ipi_irq < 0) + panic("IPI IRQ mapping failed\n"); + + irq_set_percpu_devid(ipi_irq); + r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat); + if (r < 0) + panic("IPI IRQ request failed\n"); +} + +struct smp_ops smp_ops = { + .init_ipi = loongson_init_ipi, + .send_ipi_single = loongson_send_ipi_single, + .send_ipi_mask = loongson_send_ipi_mask, +}; + static void __init fdt_smp_setup(void) { #ifdef CONFIG_OF @@ -322,7 +342,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) csr_mail_send(entry, cpu_logical_map(cpu), 0); - loongson_send_ipi_single(cpu, SMP_BOOT_CPU); + loongson_send_ipi_single(cpu, ACTION_BOOT_CPU); } /* diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index e7015f7b70e3..fd5354f9be7c 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -123,16 +123,6 @@ void sync_counter(void) csr_write64(init_offset, LOONGARCH_CSR_CNTC); } -static int get_timer_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_TI); - - return -EINVAL; -} - int constant_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -142,7 +132,7 @@ int constant_clockevent_init(void) static int irq = 0, timer_irq_installed = 0; if (!timer_irq_installed) { - irq = get_timer_irq(); + irq = get_percpu_irq(INT_TI); if (irq < 0) pr_err("Failed to map irq %d (timer)\n", irq); } -- Gitee From 2024f000dfa8f80dc62e18789cf1d425f16c4396 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 19 Jan 2024 09:37:28 +0800 Subject: [PATCH 668/953] anolis: LoongArch: KVM: Add hypercall instruction emulation support ANBZ: #8689 On LoongArch system, there is hypercall instruction special for virtualization. When system executes this instruction on host side, there is illegal instruction exception reported, however it will trap into host when it is executed in VM mode. When hypercall is emulated, A0 register is set with value KVM_HCALL_INVALID_CODE, rather than inject EXCCODE_INE invalid instruction exception. So VM can continue to executing the next code. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/Kbuild | 1 - arch/loongarch/include/asm/kvm_para.h | 26 ++++++++++++++++++++++++++ arch/loongarch/include/uapi/asm/Kbuild | 2 -- arch/loongarch/kvm/exit.c | 10 ++++++++++ 4 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 arch/loongarch/include/asm/kvm_para.h delete mode 100644 arch/loongarch/include/uapi/asm/Kbuild diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index dede0b422cfb..27f66930ab6a 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -24,4 +24,3 @@ generic-y += poll.h generic-y += param.h generic-y += posix_types.h generic-y += resource.h -generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h new file mode 100644 index 000000000000..d48f993ae206 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_KVM_PARA_H +#define _ASM_LOONGARCH_KVM_PARA_H + +/* + * LoongArch hypercall return code + */ +#define KVM_HCALL_STATUS_SUCCESS 0 +#define KVM_HCALL_INVALID_CODE -1UL +#define KVM_HCALL_INVALID_PARAMETER -2UL + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} +#endif /* _ASM_LOONGARCH_KVM_PARA_H */ diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild deleted file mode 100644 index 4aa680ca2e5f..000000000000 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -generic-y += kvm_para.h diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ed1d89d53e2e..923bbca9bd22 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -685,6 +685,15 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +{ + update_pc(&vcpu->arch); + + /* Treat it as noop intruction, only set return value */ + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -716,6 +725,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, + [EXCCODE_HVC] = kvm_handle_hypercall, }; int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) -- Gitee From 8f55d5e5437ef1230015aa99689db013b515967d Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 10:23:16 +0800 Subject: [PATCH 669/953] anolis: LoongArch: KVM: Add cpucfg area for kvm hypervisor ANBZ: #8689 Instruction cpucfg can be used to get processor features. And there is trap exception when it is executed in VM mode, and also it is to provide cpu features to VM. On real hardware cpucfg area 0 - 20 is used. Here one specified area 0x40000000 -- 0x400000ff is used for KVM hypervisor to privide PV features, and the area can be extended for other hypervisors in future. This area will never be used for real HW, it is only used by software. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/inst.h | 1 + arch/loongarch/include/asm/loongarch.h | 10 +++++ arch/loongarch/kvm/exit.c | 59 +++++++++++++++++++------- 3 files changed, 54 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 008a88ead60d..7a3829008c59 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -65,6 +65,7 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + cpucfg_op = 0x1b, iocsrrdb_op = 0x19200, iocsrrdh_op = 0x19201, iocsrrdw_op = 0x19202, diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 67be98049038..2563d9292517 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -158,6 +158,16 @@ #define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_RAM_CG BIT(3) +/* + * cpucfg index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000UL +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG CPUCFG_KVM_BASE +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) + #ifndef __ASSEMBLY__ /* CSR */ diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 923bbca9bd22..a8d3b652d3ea 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -206,10 +206,50 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) { int rd, rj; unsigned int index; + unsigned long plv; + + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + + /* + * By LoongArch Reference Manual 2.2.10.5 + * Return value is 0 for undefined cpucfg index + * + * Disable preemption since hw gcsr is accessed + */ + preempt_disable(); + plv = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD) >> CSR_CRMD_PLV_SHIFT; + switch (index) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + break; + case CPUCFG_KVM_SIG: + /* + * Cpucfg emulation between 0x40000000 -- 0x400000ff + * Return value with 0 if executed in user mode + */ + if ((plv & CSR_CRMD_PLV) == PLV_KERN) + vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; + else + vcpu->arch.gprs[rd] = 0; + break; + default: + vcpu->arch.gprs[rd] = 0; + break; + } + + preempt_enable(); + return EMULATE_DONE; +} + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ unsigned long curr_pc; larch_inst inst; enum emulation_result er = EMULATE_DONE; @@ -224,21 +264,8 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) er = EMULATE_FAIL; switch (((inst.word >> 24) & 0xff)) { case 0x0: /* CPUCFG GSPR */ - if (inst.reg2_format.opcode == 0x1B) { - rd = inst.reg2_format.rd; - rj = inst.reg2_format.rj; - ++vcpu->stat.cpucfg_exits; - index = vcpu->arch.gprs[rj]; - er = EMULATE_DONE; - /* - * By LoongArch Reference Manual 2.2.10.5 - * return value is 0 for undefined cpucfg index - */ - if (index < KVM_MAX_CPUCFG_REGS) - vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; - else - vcpu->arch.gprs[rd] = 0; - } + if (inst.reg2_format.opcode == cpucfg_op) + er = kvm_emu_cpucfg(vcpu, inst); break; case 0x4: /* CSR{RD,WR,XCHG} GSPR */ er = kvm_handle_csr(vcpu, inst); -- Gitee From b6841542bc8712cee76b562985cb337f9de6b210 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 5 Jan 2024 16:20:34 +0800 Subject: [PATCH 670/953] anolis: LoongArch: KVM: Add vcpu search support from physical cpuid ANBZ: #8689 Physical cpuid is used for interrupt routing for irqchips such as ipi/msi/extioi interrupt controller. And physical cpuid is stored at CSR register LOONGARCH_CSR_CPUID, it can not be changed once vcpu is created and physical cpuid of two vcpus cannot be the same. Different irqchips have different size declaration about physical cpuid, max cpuid value for CSR LOONGARCH_CSR_CPUID on 3A5000 is 512, max cpuid supported by IPI hardware is 1024, 256 for extioi irqchip, and 65536 for MSI irqchip. The smallest value from all interrupt controllers is selected now, and the max cpuid size is defines as 256 by KVM which comes from extioi irqchip. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 26 ++++++++ arch/loongarch/include/asm/kvm_vcpu.h | 1 + arch/loongarch/kvm/vcpu.c | 93 ++++++++++++++++++++++++++- arch/loongarch/kvm/vm.c | 11 ++++ 4 files changed, 130 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 5bdb34b2c5d6..e5ba021679f4 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -64,6 +64,30 @@ struct kvm_world_switch { #define MAX_PGTABLE_LEVELS 4 +/* + * Physical cpu id is used for interrupt routing, there are different + * definitions about physical cpuid on different hardwares. + * For LOONGARCH_CSR_CPUID register, max cpuid size if 512 + * For IPI HW, max dest CPUID size 1024 + * For extioi interrupt controller, max dest CPUID size is 256 + * For MSI interrupt controller, max supported CPUID size is 65536 + * + * Currently max CPUID is defined as 256 for KVM hypervisor, in future + * it will be expanded to 4096, including 16 packages at most. And every + * package supports at most 256 vcpus + */ +#define KVM_MAX_PHYID 256 + +struct kvm_phyid_info { + struct kvm_vcpu *vcpu; + bool enabled; +}; + +struct kvm_phyid_map { + int max_phyid; + struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; +}; + struct kvm_arch { /* Guest physical mm */ kvm_pte_t *pgd; @@ -71,6 +95,8 @@ struct kvm_arch { unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; unsigned int root_level; + spinlock_t phyid_map_lock; + struct kvm_phyid_map *phyid_map; s64 time_offset; struct kvm_context __percpu *vmcs; diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 0cb4fdb8a9b5..9f53950959da 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -81,6 +81,7 @@ void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu); int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid); /* * Loongarch KVM guest interrupt handling diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 3a8779065f73..b633fd28b8db 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -274,6 +274,95 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) return 0; } +static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + if (val >= KVM_MAX_PHYID) + return -EINVAL; + + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + map = vcpu->kvm->arch.phyid_map; + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if (map->phys_map[cpuid].enabled) { + /* + * Cpuid is already set before + * Forbid changing different cpuid at runtime + */ + if (cpuid != val) { + /* + * Cpuid 0 is initial value for vcpu, maybe invalid + * unset value for vcpu + */ + if (cpuid) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + } else { + /* Discard duplicated cpuid set */ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + } + + if (map->phys_map[val].enabled) { + /* + * New cpuid is already set with other vcpu + * Forbid sharing the same cpuid between different vcpus + */ + if (map->phys_map[val].vcpu != vcpu) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + /* Discard duplicated cpuid set operation*/ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); + map->phys_map[val].enabled = true; + map->phys_map[val].vcpu = vcpu; + if (map->max_phyid < val) + map->max_phyid = val; + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; +} + +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid) +{ + struct kvm_phyid_map *map; + + if (cpuid >= KVM_MAX_PHYID) + return NULL; + + map = kvm->arch.phyid_map; + if (map->phys_map[cpuid].enabled) + return map->phys_map[cpuid].vcpu; + + return NULL; +} + +static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (cpuid >= KVM_MAX_PHYID) + return; + + if (map->phys_map[cpuid].enabled) { + map->phys_map[cpuid].vcpu = NULL; + map->phys_map[cpuid].enabled = false; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, 0); + } +} + static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) { int ret = 0, gintc; @@ -291,7 +380,8 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); return ret; - } + } else if (id == LOONGARCH_CSR_CPUID) + return kvm_set_cpuid(vcpu, val); kvm_write_sw_gcsr(csr, id, val); @@ -943,6 +1033,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) hrtimer_cancel(&vcpu->arch.swtimer); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kfree(vcpu->arch.csr); + kvm_drop_cpuid(vcpu); /* * If the vCPU is freed and reused as another vCPU, we don't want the diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 0a37f6fa8f2d..6006a28653ad 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -30,6 +30,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.pgd) return -ENOMEM; + kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), + GFP_KERNEL_ACCOUNT); + if (!kvm->arch.phyid_map) { + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; + return -ENOMEM; + } + kvm_init_vmcs(kvm); kvm->arch.gpa_size = BIT(cpu_vabits - 1); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; @@ -44,6 +52,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) for (i = 0; i <= kvm->arch.root_level; i++) kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + spin_lock_init(&kvm->arch.phyid_map_lock); return 0; } @@ -51,7 +60,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_destroy_vcpus(kvm); free_page((unsigned long)kvm->arch.pgd); + kvfree(kvm->arch.phyid_map); kvm->arch.pgd = NULL; + kvm->arch.phyid_map = NULL; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) -- Gitee From 4907b849d1dbb2d2292558f643f47929967587e0 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 1 Mar 2024 18:17:29 +0800 Subject: [PATCH 671/953] anolis: LoongArch: KVM: Add pv ipi support on kvm side ANBZ: #8689 On LoongArch system, ipi hw uses iocsr registers, there is one iocsr register access on ipi sending, and two iocsr access on ipi receiving which is ipi interrupt handler. On VM mode all iocsr accessing will cause VM to trap into hypervisor. So with one ipi hw notification there will be three times of trap. PV ipi is added for VM, hypercall instruction is used for ipi sender, and hypervisor will inject SWI to destination vcpu. During SWI interrupt handler, only estat CSR register is written to clear irq. Estat CSR register access will not trap into hypervisor. So with pv ipi supported, there is one trap with pv ipi sender, and no trap with ipi receiver, there is only one trap with ipi notification. Also this patch adds ipi multicast support, the method is similar with x86. With ipi multicast support, ipi notification can be sent to at most 128 vcpus at one time. It reduces trap times into hypervisor greatly. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 1 + arch/loongarch/include/asm/kvm_para.h | 130 +++++++++++++++++++++++++ arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/exit.c | 76 ++++++++++++++- arch/loongarch/kvm/vcpu.c | 1 + 5 files changed, 207 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index e5ba021679f4..c31619ae1fb4 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -43,6 +43,7 @@ struct kvm_vcpu_stat { u64 idle_exits; u64 cpucfg_exits; u64 signal_exits; + u64 hypercall_exits; }; #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index d48f993ae206..a82bffbbf8a1 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -2,6 +2,16 @@ #ifndef _ASM_LOONGARCH_KVM_PARA_H #define _ASM_LOONGARCH_KVM_PARA_H +/* + * Hypercall code field + */ +#define HYPERVISOR_KVM 1 +#define HYPERVISOR_VENDOR_SHIFT 8 +#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) +#define KVM_HCALL_CODE_PV_SERVICE 0 +#define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) +#define KVM_HCALL_FUNC_PV_IPI 1 + /* * LoongArch hypercall return code */ @@ -9,6 +19,126 @@ #define KVM_HCALL_INVALID_CODE -1UL #define KVM_HCALL_INVALID_PARAMETER -2UL +/* + * Hypercall interface for KVM hypervisor + * + * a0: function identifier + * a1-a6: args + * Return value will be placed in v0. + * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. + */ +static __always_inline long kvm_hypercall(u64 fid) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall2(u64 fid, + unsigned long arg0, unsigned long arg1) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall3(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2), "r" (a3) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall4(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall5(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + register unsigned long a5 asm("a5") = arg4; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) + : "memory" + ); + + return ret; +} + + static inline unsigned int kvm_arch_para_features(void) { return 0; diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 2563d9292517..cd58cb042b6a 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -167,6 +167,7 @@ #define CPUCFG_KVM_SIG CPUCFG_KVM_BASE #define KVM_SIGNATURE "KVM\0" #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_PV_IPI BIT(1) #ifndef __ASSEMBLY__ diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index a8d3b652d3ea..933879ad0ddc 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -239,6 +239,12 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) else vcpu->arch.gprs[rd] = 0; break; + case CPUCFG_KVM_FEATURE: + if ((plv & CSR_CRMD_PLV) == PLV_KERN) + vcpu->arch.gprs[rd] = KVM_FEATURE_PV_IPI; + else + vcpu->arch.gprs[rd] = 0; + break; default: vcpu->arch.gprs[rd] = 0; break; @@ -712,12 +718,78 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) +{ + unsigned long ipi_bitmap; + unsigned int min, cpu, i; + struct kvm_vcpu *dest; + + min = vcpu->arch.gprs[LOONGARCH_GPR_A3]; + for (i = 0; i < 2; i++, min += BITS_PER_LONG) { + ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1 + i]; + if (!ipi_bitmap) + continue; + + cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); + while (cpu < BITS_PER_LONG) { + dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); + cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, + cpu + 1); + if (!dest) + continue; + + /* + * Send SWI0 to dest vcpu to emulate IPI interrupt + */ + kvm_queue_irq(dest, INT_SWI0); + kvm_vcpu_kick(dest); + } + } + + return 0; +} + +/* + * hypercall emulation always return to guest, Caller should check retval. + */ +static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) +{ + unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0]; + long ret; + + switch (func) { + case KVM_HCALL_FUNC_PV_IPI: + kvm_pv_send_ipi(vcpu); + ret = KVM_HCALL_STATUS_SUCCESS; + break; + default: + ret = KVM_HCALL_INVALID_CODE; + break; + }; + + vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret; +} + static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { + larch_inst inst; + unsigned int code; + + inst.word = vcpu->arch.badi; + code = inst.reg0i15_format.immediate; update_pc(&vcpu->arch); - /* Treat it as noop intruction, only set return value */ - vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + switch (code) { + case KVM_HCALL_PV_SERVICE: + vcpu->stat.hypercall_exits++; + kvm_handle_pv_service(vcpu); + break; + default: + /* Treat it as noop intruction, only set return value */ + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; + } + return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b633fd28b8db..76f2086ab68b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -19,6 +19,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, idle_exits), STATS_DESC_COUNTER(VCPU, cpucfg_exits), STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, hypercall_exits) }; const struct kvm_stats_header kvm_vcpu_stats_header = { -- Gitee From bbc0118d21f5ed4ae7f056bf8361a67f8982b88f Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 1 Mar 2024 11:42:21 +0800 Subject: [PATCH 672/953] anolis: LoongArch: Add pv ipi support on guest kernel side ANBZ: #8689 PARAVIRT option and pv ipi is added on guest kernel side, function pv_ipi_init() is to add ipi sending and ipi receiving hooks. This function firstly checks whether system runs on VM mode. If kernel runs on VM mode, it will call function kvm_para_available() to detect current hypervirsor type. Now only KVM type detection is supported, the paravirt function can work only if current hypervisor type is KVM, since there is only KVM supported on LoongArch now. PV IPI uses virtual IPI sender and virtual IPI receiver function. With virutal IPI sender, ipi message is stored in DDR memory rather than emulated HW. IPI multicast is supported, and 128 vcpus can received IPIs at the same time like X86 KVM method. Hypercall method is used for IPI sending. With virtual IPI receiver, HW SW0 is used rather than real IPI HW. Since VCPU has separate HW SW0 like HW timer, there is no trap in IPI interrupt acknowledge. And IPI message is stored in DDR, no trap in get IPI message. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/Kconfig | 9 ++ arch/loongarch/include/asm/hardirq.h | 1 + arch/loongarch/include/asm/paravirt.h | 27 ++++ .../include/asm/paravirt_api_clock.h | 10 ++ arch/loongarch/kernel/Makefile | 1 + arch/loongarch/kernel/irq.c | 2 +- arch/loongarch/kernel/paravirt.c | 151 ++++++++++++++++++ arch/loongarch/kernel/smp.c | 4 +- 8 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 arch/loongarch/include/asm/paravirt.h create mode 100644 arch/loongarch/include/asm/paravirt_api_clock.h create mode 100644 arch/loongarch/kernel/paravirt.c diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index bad326ae58f2..7b82992af3c4 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -565,6 +565,15 @@ config CPU_HAS_PREFETCH bool default y +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + config ARCH_SUPPORTS_KEXEC def_bool y diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 9f0038e19c7f..b26d596a73aa 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -21,6 +21,7 @@ enum ipi_msg_type { typedef struct { unsigned int ipi_irqs[NR_IPI]; unsigned int __softirq_pending; + atomic_t message ____cacheline_aligned_in_smp; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h new file mode 100644 index 000000000000..58f7b7b89f2c --- /dev/null +++ b/arch/loongarch/include/asm/paravirt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_PARAVIRT_H +#define _ASM_LOONGARCH_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int pv_ipi_init(void); +#else +static inline int pv_ipi_init(void) +{ + return 0; +} + +#endif // CONFIG_PARAVIRT +#endif diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..8a418f0b4fd5 --- /dev/null +++ b/arch/loongarch/include/asm/paravirt_api_clock.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#ifndef _ASM_API_CLOCK_H +#define _ASM_API_CLOCK_H + +#include + +#endif diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 10ee5fc7ac3e..6c148ccea674 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 73cd3b7703e2..8b21449a7092 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -154,5 +154,5 @@ void __init init_IRQ(void) per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE); } - set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); + set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); } diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c new file mode 100644 index 000000000000..9044ed62045c --- /dev/null +++ b/arch/loongarch/kernel/paravirt.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +#ifdef CONFIG_SMP +static void pv_send_ipi_single(int cpu, unsigned int action) +{ + unsigned int min, old; + irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + + old = atomic_fetch_or(BIT(action), &info->message); + if (old) + return; + + min = cpu_logical_map(cpu); + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); +} + +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int cpu, i, min = 0, max = 0, old; + __uint128_t bitmap = 0; + irq_cpustat_t *info; + + if (cpumask_empty(mask)) + return; + + action = BIT(action); + for_each_cpu(i, mask) { + info = &per_cpu(irq_stat, i); + old = atomic_fetch_or(action, &info->message); + if (old) + continue; + + cpu = cpu_logical_map(i); + if (!bitmap) { + min = max = cpu; + } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { + max = cpu > max ? cpu : max; + } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + bitmap <<= min - cpu; + min = cpu; + } else { + /* + * Physical cpuid is sorted in ascending order ascend + * for the next mask calculation, send IPI here + * directly and skip the remainding cpus + */ + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, + (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); + min = max = cpu; + bitmap = 0; + } + __set_bit(cpu - min, (unsigned long *)&bitmap); + } + + if (bitmap) + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); +} + +static irqreturn_t loongson_do_swi(int irq, void *dev) +{ + irq_cpustat_t *info; + long action; + + /* Clear swi interrupt */ + clear_csr_estat(1 << INT_SWI0); + info = this_cpu_ptr(&irq_stat); + action = atomic_xchg(&info->message, 0); + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + + if (action & SMP_RESCHEDULE) { + scheduler_ipi(); + info->ipi_irqs[IPI_RESCHEDULE]++; + } + + return IRQ_HANDLED; +} + +static void pv_init_ipi(void) +{ + int r, swi0; + + swi0 = get_percpu_irq(INT_SWI0); + if (swi0 < 0) + panic("SWI0 IRQ mapping failed\n"); + irq_set_percpu_devid(swi0); + r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + if (r < 0) + panic("SWI0 IRQ request failed\n"); +} +#endif + +static bool kvm_para_available(void) +{ + static int hypervisor_type; + int config; + + if (!hypervisor_type) { + config = read_cpucfg(CPUCFG_KVM_SIG); + if (!memcmp(&config, KVM_SIGNATURE, 4)) + hypervisor_type = HYPERVISOR_KVM; + } + + return hypervisor_type == HYPERVISOR_KVM; +} + +int __init pv_ipi_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + /* + * check whether KVM hypervisor supports pv_ipi or not + */ + feature = read_cpucfg(CPUCFG_KVM_FEATURE); +#ifdef CONFIG_SMP + if (feature & KVM_FEATURE_PV_IPI) { + smp_ops.init_ipi = pv_init_ipi; + smp_ops.send_ipi_single = pv_send_ipi_single; + smp_ops.send_ipi_mask = pv_send_ipi_mask; + } +#endif + + return 1; +} diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 9c3557760b5b..897127c26388 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -307,6 +308,7 @@ void __init loongson_smp_setup(void) cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; + pv_ipi_init(); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus); } @@ -351,7 +353,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) void loongson_init_secondary(void) { unsigned int cpu = smp_processor_id(); - unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | + unsigned int imask = ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER; change_csr_ecfg(ECFG0_IM, imask); -- Gitee From e5c4a74d6436f377f552a5e72dc6a09c2203cc6b Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Sat, 2 Mar 2024 15:20:50 +0800 Subject: [PATCH 673/953] anolis: Documentation: KVM: Add hypercall for LoongArch ANBZ: #8689 Add documentation topic for using pv_virt when running as a guest on KVM hypervisor. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- Documentation/virt/kvm/index.rst | 1 + .../virt/kvm/loongarch/hypercalls.rst | 79 +++++++++++++++++++ Documentation/virt/kvm/loongarch/index.rst | 10 +++ 3 files changed, 90 insertions(+) create mode 100644 Documentation/virt/kvm/loongarch/hypercalls.rst create mode 100644 Documentation/virt/kvm/loongarch/index.rst diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index ad13ec55ddfe..9ca5a45c2140 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -14,6 +14,7 @@ KVM s390/index ppc-pv x86/index + loongarch/index locking vcpu-requests diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst new file mode 100644 index 000000000000..1679e48d67d2 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/hypercalls.rst @@ -0,0 +1,79 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +The LoongArch paravirtual interface +=================================== + +KVM hypercalls use the HVCL instruction with code 0x100, and the hypercall +number is put in a0 and up to five arguments may be placed in a1-a5, the +return value is placed in v0 (alias with a0). + +The code for that interface can be found in arch/loongarch/kvm/* + +Querying for existence +====================== + +To find out if we're running on KVM or not, cpucfg can be used with index +CPUCFG_KVM_BASE (0x40000000), cpucfg range between 0x40000000 - 0x400000FF +is marked as a specially reserved range. All existing and future processors +will not implement any features in this range. + +When Linux is running on KVM, cpucfg with index CPUCFG_KVM_BASE (0x40000000) +returns magic string "KVM\0" + +Once you determined you're running under a PV capable KVM, you can now use +hypercalls as described below. + +KVM hypercall ABI +================= + +Hypercall ABI on KVM is simple, only one scratch register a0 (v0) and at most +five generic registers used as input parameter. FP register and vector register +is not used for input register and should not be modified during hypercall. +Hypercall function can be inlined since there is only one scratch register. + +The parameters are as follows: + + ======== ================ ================ + Register IN OUT + ======== ================ ================ + a0 function number Return code + a1 1st parameter - + a2 2nd parameter - + a3 3rd parameter - + a4 4th parameter - + a5 5th parameter - + ======== ================ ================ + +Return codes can be as follows: + + ==== ========================= + Code Meaning + ==== ========================= + 0 Success + -1 Hypercall not implemented + -2 Hypercall parameter error + ==== ========================= + +KVM Hypercalls Documentation +============================ + +The template for each hypercall is: +1. Hypercall name +2. Purpose + +1. KVM_HCALL_FUNC_PV_IPI +------------------------ + +:Purpose: Send IPIs to multiple vCPUs. + +- a0: KVM_HCALL_FUNC_PV_IPI +- a1: lower part of the bitmap of destination physical CPUIDs +- a2: higher part of the bitmap of destination physical CPUIDs +- a3: the lowest physical CPUID in bitmap + +The hypercall lets a guest send multicast IPIs, with at most 128 +destinations per hypercall. The destinations are represented by a bitmap +contained in the first two arguments (a1 and a2). Bit 0 of a1 corresponds +to the physical CPUID in the third argument (a3), bit 1 corresponds to the +physical ID a3+1, and so on. diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst new file mode 100644 index 000000000000..83387b4c5345 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/index.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +KVM for LoongArch systems +========================= + +.. toctree:: + :maxdepth: 2 + + hypercalls.rst -- Gitee From b6b6b95f98c4b17a682ead3a67a06c9b7a6728f6 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 11:03:13 +0800 Subject: [PATCH 674/953] anolis: LoongArch: KVM: Add software breakpoint support ANBZ: #8689 When VM runs in kvm mode, system will not exit to host mode if executing general software breakpoint instruction, one trap exception happens in guest mode rather than host mode. In order to debug guest kernel on host side, one mechanism should be used to let vm exit to host mode. Here one special hypercall code is used for software breakpoint usage, vm exists to host mode and kvm hypervisor identifies the special hypercall code and sets exit_reason with KVM_EXIT_DEBUG, and then let qemu handle it. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/inst.h | 1 + arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/include/asm/kvm_para.h | 2 ++ arch/loongarch/include/uapi/asm/kvm.h | 4 ++++ arch/loongarch/kvm/exit.c | 16 ++++++++++++++-- arch/loongarch/kvm/vcpu.c | 13 ++++++++++++- arch/loongarch/kvm/vm.c | 1 + 7 files changed, 36 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 7a3829008c59..e4c545fecaea 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -12,6 +12,7 @@ #define INSN_NOP 0x03400000 #define INSN_BREAK 0x002a0000 +#define INSN_HVCL 0x002b8000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index c31619ae1fb4..3a92b6b024d5 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -31,6 +31,8 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 pages; diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index a82bffbbf8a1..db4579923542 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -9,8 +9,10 @@ #define HYPERVISOR_VENDOR_SHIFT 8 #define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) #define KVM_HCALL_CODE_PV_SERVICE 0 +#define KVM_HCALL_CODE_SWDBG 1 #define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) #define KVM_HCALL_FUNC_PV_IPI 1 +#define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) /* * LoongArch hypercall return code diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 923d0bd38294..4cec8c16013c 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -15,10 +15,12 @@ */ #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_GUEST_DEBUG #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 /* * for KVM_GET_REGS and KVM_SET_REGS */ @@ -74,6 +76,8 @@ struct kvm_fpu { #define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) #define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) +/* Debugging: Special instruction for software breakpoint */ +#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) #define LOONGARCH_REG_SHIFT 3 #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 933879ad0ddc..19822813755d 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -774,23 +774,35 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { larch_inst inst; unsigned int code; + int ret; inst.word = vcpu->arch.badi; code = inst.reg0i15_format.immediate; - update_pc(&vcpu->arch); + ret = RESUME_GUEST; switch (code) { case KVM_HCALL_PV_SERVICE: vcpu->stat.hypercall_exits++; kvm_handle_pv_service(vcpu); break; + case KVM_HCALL_SWDBG: + /* KVM_HC_SWDBG only in effective when SW_BP is enabled */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + ret = RESUME_HOST; + } else + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; default: /* Treat it as noop intruction, only set return value */ vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; break; } - return RESUME_GUEST; + if (ret == RESUME_GUEST) + update_pc(&vcpu->arch); + + return ret; } /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 76f2086ab68b..f22d10228cd2 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -248,7 +248,15 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EINVAL; + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) + return -EINVAL; + + if (dbg->control & KVM_GUESTDBG_ENABLE) + vcpu->guest_debug = dbg->control; + else + vcpu->guest_debug = 0; + + return 0; } static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) @@ -500,6 +508,9 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_LOONGARCH_COUNTER: *v = drdtime() + vcpu->kvm->arch.time_offset; break; + case KVM_REG_LOONGARCH_DEBUG_INST: + *v = INSN_HVCL + KVM_HCALL_SWDBG; + break; default: ret = -EINVAL; break; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 6006a28653ad..06fd746b03b6 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -77,6 +77,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_IOEVENTFD: case KVM_CAP_MP_STATE: + case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; case KVM_CAP_NR_VCPUS: -- Gitee From e68d9ac6b0b19b0d6aee01b2846bfd6c193f49ca Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 13 Mar 2024 17:48:26 +0800 Subject: [PATCH 675/953] anolis: irqchip/loongson-eiointc: Add virt extension support ANBZ: #8689 With virt eiointc, interrupt can be routed to 256 vcpus Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 43 +++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 1daa87163d03..c5c26b8e8d0c 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -23,6 +23,16 @@ #define EIOINTC_REG_ISR 0x1800 #define EIOINTC_REG_ROUTE 0x1c00 +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION 0 +#define EXTIOI_HAS_ENABLE_OPTION 1 +#define EXTIOI_HAS_INT_ENCODE 2 +#define EXTIOI_HAS_CPU_ENCODE 3 +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE 1 +#define EXTIOI_ENABLE_INT_ENCODE 2 +#define EXTIOI_ENABLE_CPU_ENCODE 3 + #define VEC_REG_COUNT 4 #define VEC_COUNT_PER_REG 64 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) @@ -41,6 +51,7 @@ struct eiointc_priv { cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; struct irq_domain *eiointc_domain; + bool cpu_encoded; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -91,7 +102,16 @@ static DEFINE_RAW_SPINLOCK(affinity_lock); static void virt_extioi_set_irq_route(int irq, unsigned int cpu) { - iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + irq); + int data; + + /* + * get irq route info for continuous 4 vectors + * and set affinity for specified vector + */ + data = iocsr_read32(EIOINTC_REG_ROUTE + (irq & ~3)); + data &= ~(0xff << ((irq & 3) * 8)); + data |= cpu_logical_map(cpu) << ((irq & 3) * 8); + iocsr_write32(data, EIOINTC_REG_ROUTE + (irq & ~3)); } static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) @@ -116,7 +136,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - if (cpu_has_hypervisor) { + if (priv->cpu_encoded) { iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); virt_extioi_set_irq_route(vector, cpu); iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); @@ -181,8 +201,10 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ - if (index == 0) - bit = (cpu_has_hypervisor ? cpu_logical_map(0) : BIT(cpu_logical_map(0))); + if (eiointc_priv[index]->cpu_encoded) + bit = cpu_logical_map(0); + else if (index == 0) + bit = BIT(cpu_logical_map(0)); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -383,7 +405,7 @@ static int __init acpi_cascade_irqdomain_init(void) static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, u64 node_map) { - int i; + int i, val; node_map = node_map ? node_map : -1ULL; for_each_possible_cpu(i) { @@ -403,6 +425,17 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, return -ENOMEM; } + if (cpu_has_hypervisor) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + if (val & BIT(EXTIOI_HAS_CPU_ENCODE)) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= BIT(EXTIOI_ENABLE_CPU_ENCODE); + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->cpu_encoded = true; + pr_info("loongson-extioi: enable cpu encodig\n"); + } + } + eiointc_priv[nr_pics++] = priv; eiointc_router_init(0); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); -- Gitee From 6a3f9b992b634eab3e6d3a18adc7db9924378ffc Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 22 Mar 2024 16:24:10 +0800 Subject: [PATCH 676/953] anolis: LoongArch: KVM: Add steal time support in kvm side ANBZ: #8689 Steal time feature is added here in kvm side, VM can search supported features provided by KVM hypervisor, feature KVM_FEATURE_STEAL_TIME is added here. Like x86, steal time structure is saved in guest memory, one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to enable the feature. One cpu attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to save and restore base address of steal time structure when VM is migrated. Since it needs hypercall instruction emulation handling, and it is dependent on this patchset: https://lore.kernel.org/all/20240201031950.3225626-1-maobibo@loongson.cn/ Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 7 ++ arch/loongarch/include/asm/kvm_para.h | 10 ++ arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/include/uapi/asm/kvm.h | 4 + arch/loongarch/kvm/exit.c | 35 ++++++- arch/loongarch/kvm/vcpu.c | 122 +++++++++++++++++++++++++ 6 files changed, 174 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 3a92b6b024d5..778874703483 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) @@ -197,6 +198,12 @@ struct kvm_vcpu_arch { struct kvm_mp_state mp_state; /* cpucfg */ u32 cpucfg[KVM_MAX_CPUCFG_REGS]; + /* paravirt steal time */ + struct { + u64 guest_addr; + u64 last_steal; + struct gfn_to_hva_cache cache; + } st; }; static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index db4579923542..032101b941d9 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -12,6 +12,7 @@ #define KVM_HCALL_CODE_SWDBG 1 #define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) #define KVM_HCALL_FUNC_PV_IPI 1 +#define KVM_HCALL_FUNC_NOTIFY 2 #define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) /* @@ -21,6 +22,15 @@ #define KVM_HCALL_INVALID_CODE -1UL #define KVM_HCALL_INVALID_PARAMETER -2UL +#define KVM_STEAL_PHYS_VALID BIT_ULL(0) +#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6) +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u32 pad[12]; +}; + /* * Hypercall interface for KVM hypervisor * diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index cd58cb042b6a..bbfc3579bfcd 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -168,6 +168,7 @@ #define KVM_SIGNATURE "KVM\0" #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_PV_IPI BIT(1) +#define KVM_FEATURE_STEAL_TIME BIT(2) #ifndef __ASSEMBLY__ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 4cec8c16013c..9891ed93816a 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -83,7 +83,11 @@ struct kvm_fpu { #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +/* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 +#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 +#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 19822813755d..13a9e58f463b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -209,7 +209,7 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) { int rd, rj; - unsigned int index; + unsigned int index, ret; unsigned long plv; rd = inst.reg2_format.rd; @@ -240,10 +240,13 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.gprs[rd] = 0; break; case CPUCFG_KVM_FEATURE: - if ((plv & CSR_CRMD_PLV) == PLV_KERN) - vcpu->arch.gprs[rd] = KVM_FEATURE_PV_IPI; - else - vcpu->arch.gprs[rd] = 0; + ret = 0; + if ((plv & CSR_CRMD_PLV) == PLV_KERN) { + ret = KVM_FEATURE_PV_IPI; + if (sched_info_on()) + ret |= KVM_FEATURE_STEAL_TIME; + } + vcpu->arch.gprs[rd] = ret; break; default: vcpu->arch.gprs[rd] = 0; @@ -749,6 +752,25 @@ static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) return 0; } +static int kvm_save_notify(struct kvm_vcpu *vcpu) +{ + unsigned long id, data; + + id = vcpu->arch.gprs[LOONGARCH_GPR_A1]; + data = vcpu->arch.gprs[LOONGARCH_GPR_A2]; + switch (id) { + case KVM_FEATURE_STEAL_TIME: + vcpu->arch.st.guest_addr = data; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + break; + default: + break; + }; + + return 0; +}; + /* * hypercall emulation always return to guest, Caller should check retval. */ @@ -762,6 +784,9 @@ static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) kvm_pv_send_ipi(vcpu); ret = KVM_HCALL_STATUS_SUCCESS; break; + case KVM_HCALL_FUNC_NOTIFY: + ret = kvm_save_notify(vcpu); + break; default: ret = KVM_HCALL_INVALID_CODE; break; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index f22d10228cd2..49c1172f0005 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -31,6 +31,115 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) +{ + struct kvm_steal_time __user *st; + struct gfn_to_hva_cache *ghc; + struct kvm_memslots *slots; + gpa_t gpa; + u64 steal; + u32 version; + + ghc = &vcpu->arch.st.cache; + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, + sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_get_user(version, &st->version, out); + if (version & 1) + version += 1; + version += 1; + unsafe_put_user(version, &st->version, out); + /* Make sure st->version is written first */ + smp_wmb(); + + unsafe_get_user(steal, &st->steal, out); + steal += current->sched_info.run_delay - + vcpu->arch.st.last_steal; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + unsafe_put_user(steal, &st->steal, out); + + /* Make sure st->steal is written first */ + smp_wmb(); + version += 1; + unsafe_put_user(version, &st->version, out); +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + +static bool kvm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + +static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + u64 gpa; + int ret = 0; + int idx; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + if (get_user(gpa, user)) + return -EFAULT; + + /* Check the address is in a valid memslot */ + idx = srcu_read_lock(&kvm->srcu); + if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) + ret = -EINVAL; + srcu_read_unlock(&kvm->srcu, idx); + + if (!ret) + vcpu->arch.st.guest_addr = gpa; + + return ret; +} + +static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + u64 gpa; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + gpa = vcpu->arch.st.guest_addr; + if (put_user(gpa, user)) + return -EFAULT; + + return 0; +} + +static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VCPU_PVTIME_GPA: + if (kvm_pvtime_supported()) + return 0; + } + + return -ENXIO; +} + /* * kvm_check_requests - check and handle pending vCPU requests * @@ -48,6 +157,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) if (kvm_dirty_ring_check_request(vcpu)) return RESUME_HOST; + if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) + kvm_update_stolen_time(vcpu); + return RESUME_GUEST; } @@ -672,6 +784,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); + break; default: break; } @@ -704,6 +819,9 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); + break; default: break; } @@ -726,6 +844,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); + break; default: break; } @@ -1084,6 +1205,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); /* Don't bother restoring registers multiple times unless necessary */ if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) -- Gitee From 3dbfffd92e6c2c9b0bb4828301744eed48b70f12 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 22 Mar 2024 16:42:48 +0800 Subject: [PATCH 677/953] anolis: LoongArch: Add steal time support in guest side ANBZ: #8689 Percpu struct kvm_steal_time is added here, its size is 64 bytes and also defined as 64 bytes, so that the whole structure is in one physical page. When vcpu is onlined, function pv_register_steal_time() is called. This function will pass physical address of struct kvm_steal_time and tells hypervisor to enable steal time. When vcpu is offline, physical address is set as 0 and tells hypervisor to disable steal time. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/paravirt.h | 5 + arch/loongarch/kernel/paravirt.c | 130 ++++++++++++++++++++++++++ arch/loongarch/kernel/time.c | 2 + 3 files changed, 137 insertions(+) diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h index 58f7b7b89f2c..fe27fb5e82b8 100644 --- a/arch/loongarch/include/asm/paravirt.h +++ b/arch/loongarch/include/asm/paravirt.h @@ -17,11 +17,16 @@ static inline u64 paravirt_steal_clock(int cpu) } int pv_ipi_init(void); +int __init pv_time_init(void); #else static inline int pv_ipi_init(void) { return 0; } +static inline int pv_time_init(void) +{ + return 0; +} #endif // CONFIG_PARAVIRT #endif diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 9044ed62045c..56182c64ab38 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -5,10 +5,13 @@ #include #include #include +#include #include struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +static int has_steal_clock; static u64 native_steal_clock(int cpu) { @@ -17,6 +20,57 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 para_steal_clock(int cpu) +{ + u64 steal; + struct kvm_steal_time *src; + int version; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + /* Make sure that the version is read before the steal */ + virt_rmb(); + steal = src->steal; + /* Make sure that the steal is read before the next version */ + virt_rmb(); + + } while ((version & 1) || (version != src->version)); + return steal; +} + +static int pv_register_steal_time(void) +{ + int cpu = smp_processor_id(); + struct kvm_steal_time *st; + unsigned long addr; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + return 0; +} + #ifdef CONFIG_SMP static void pv_send_ipi_single(int cpu, unsigned int action) { @@ -110,6 +164,32 @@ static void pv_init_ipi(void) if (r < 0) panic("SWI0 IRQ request failed\n"); } + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +static int pv_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_register_steal_time(); + local_irq_restore(flags); + return 0; +} + +static int pv_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + return 0; +} #endif static bool kvm_para_available(void) @@ -149,3 +229,53 @@ int __init pv_ipi_init(void) return 1; } + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + if (pv_register_steal_time()) { + has_steal_clock = 0; + return 0; + } + + register_reboot_notifier(&pv_reboot_nb); + static_call_update(pv_steal_clock, para_steal_clock); + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + +#ifdef CONFIG_SMP + if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", + pv_cpu_online, pv_cpu_down_prepare) < 0) + pr_err("Failed to install cpu hotplug callbacks\n"); +#endif + pr_info("Using stolen time PV\n"); + return 0; +} diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index fd5354f9be7c..46d7d40c87e3 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -15,6 +15,7 @@ #include #include +#include #include u64 cpu_clock_freq; @@ -214,4 +215,5 @@ void __init time_init(void) constant_clockevent_init(); constant_clocksource_init(); + pv_time_init(); } -- Gitee From 809ba9e1e7598e83e83c75024312bcfdc0cbfbaa Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 28 Mar 2024 17:10:08 +0800 Subject: [PATCH 678/953] anolis: LoongArch: KVM: Add PMU support ANBZ: #8689 Add PMU device emulation Signed-off-by: Song Gao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_csr.h | 5 ++ arch/loongarch/include/asm/kvm_host.h | 14 ++++ arch/loongarch/include/asm/kvm_vcpu.h | 2 + arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/exit.c | 7 ++ arch/loongarch/kvm/vcpu.c | 97 +++++++++++++++++++++++++- 6 files changed, 125 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h index 724ca8b7b401..476c9f620dd5 100644 --- a/arch/loongarch/include/asm/kvm_csr.h +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -208,4 +208,9 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, csr->csrs[gid] |= val & _mask; } +#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \ + CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | \ + CSR_PERFCTRL_PLV3) + #endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 778874703483..c146d2ebdb90 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -129,6 +129,7 @@ enum emulation_result { #define KVM_LARCH_LASX (0x1 << 2) #define KVM_LARCH_SWCSR_LATEST (0x1 << 3) #define KVM_LARCH_HWCSR_USABLE (0x1 << 4) +#define KVM_LARCH_PERF (0x1 << 5) struct kvm_vcpu_arch { /* @@ -204,6 +205,9 @@ struct kvm_vcpu_arch { u64 last_steal; struct gfn_to_hva_cache cache; } st; + /* Save host pmu csr */ + u64 perf_ctrl[4]; + u64 perf_cntr[4]; }; static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) @@ -231,6 +235,16 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LASX; } +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[6] & CPUCFG6_PMP; +} + +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +{ + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 9f53950959da..1da24994b838 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -75,6 +75,8 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif +int kvm_own_pmu(struct kvm_vcpu *vcpu); + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index bbfc3579bfcd..e852c0f62eb7 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -119,6 +119,7 @@ #define CPUCFG6_PMP BIT(0) #define CPUCFG6_PAMVER GENMASK(3, 1) #define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMNUM_SHIFT 4 #define CPUCFG6_PMBITS GENMASK(13, 8) #define CPUCFG6_UPM BIT(14) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 13a9e58f463b..8affc6d4a66e 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -83,6 +83,13 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) rj = inst.reg2csr_format.rj; csrid = inst.reg2csr_format.csr; + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) { + if (!kvm_own_pmu(vcpu)) { + vcpu->arch.pc -= 4; + return EMULATE_DONE; + } + } + /* Process CSR ops */ switch (rj) { case 0: /* process csrrd */ diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 49c1172f0005..685f2826d022 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -544,6 +544,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) case LOONGARCH_CPUCFG5: *v = GENMASK(31, 0); return 0; + case LOONGARCH_CPUCFG6: + if (cpu_has_pmp) + *v = GENMASK(14, 0); + else + *v = 0; + return 0; case LOONGARCH_CPUCFG16: *v = GENMASK(16, 0); return 0; @@ -562,7 +568,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) static int kvm_check_cpucfg(int id, u64 val) { - int ret; + int ret, host; u64 mask = 0; ret = _kvm_get_cpucfg_mask(id, &mask); @@ -588,6 +594,18 @@ static int kvm_check_cpucfg(int id, u64 val) /* LASX architecturally implies LSX and FP but val does not satisfy that */ return -EINVAL; return 0; + case LOONGARCH_CPUCFG6: + if (val & CPUCFG6_PMP) { + host = read_cpucfg(6); + if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) + /* Guest pmbits must be the same with host */ + return -EINVAL; + if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) + return -EINVAL; + if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM)) + return -EINVAL; + } + return 0; default: /* * Values for the other CPUCFG IDs are not being further validated @@ -767,6 +785,7 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, { switch (attr->attr) { case 2: + case 6: return 0; default: return -ENXIO; @@ -1067,6 +1086,77 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + preempt_enable(); + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* save guest pmu csr */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0); + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3)) + & KVM_PMU_PLV_ENABLE) == 0) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF; +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* Set PM0-PM(num) to Guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); +} + + int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { int intr = (int)irq->irq; @@ -1205,6 +1295,10 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + + /* Restore hardware perf csr */ + kvm_restore_pmu(vcpu); + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); /* Don't bother restoring registers multiple times unless necessary */ @@ -1290,6 +1384,7 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) struct loongarch_csrs *csr = vcpu->arch.csr; kvm_lose_fpu(vcpu); + kvm_lose_pmu(vcpu); /* * Update CSR state from hardware if software CSR state is stale, -- Gitee From 7ad3b471c3ea97efb3562d97a6426dd3feebf9c0 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Fri, 29 Mar 2024 10:30:47 +0800 Subject: [PATCH 679/953] anolis: config: open mpi3mr driver ANBZ: #8665 Open mpi3mr driver by default. Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2970 --- arch/arm64/configs/anolis-debug_defconfig | 2 +- arch/arm64/configs/anolis_defconfig | 2 +- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 8b932435df46..97230e6e79b5 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -2406,7 +2406,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index ba7a0a1e15f4..6c0af4f2c954 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -2403,7 +2403,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 05b04486b420..edbe050c1962 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -2504,7 +2504,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ca8ff01300ac..2845d0515d6d 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2499,7 +2499,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set -- Gitee From 73ccf00952a35e0ca908ba2e52289d2b508caecb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Sep 2023 20:09:00 -0400 Subject: [PATCH 680/953] anolis: KVM: SVM: Add support for different CSV guests to reuse the same ASID ANBZ: #8676 If user want to reuse one ASID for many CSV guests, he should provide a label (i.e. userid) and the length of the label when launch CSV guest. The reference count of the ASID will be increased if user launch a CSV guest with the label correspond to the ASID. When a CSV guest which launch with a label is destroyed, the reference count of the ASID correspond to the label will be decreased, and the ASID is freed only if the reference count becomes zero. The codes for reuse ASID is not compatible with CONFIG_CGROUP_MISC, we introduce CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID that depends on !CGROUP_MISC, the code take effect only when CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y. Make CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y as the default configure. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2976 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + arch/x86/kvm/Kconfig | 10 ++ arch/x86/kvm/svm/sev.c | 146 +++++++++++++++++++++++- include/uapi/linux/kvm.h | 5 + 5 files changed, 162 insertions(+), 1 deletion(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index edbe050c1962..92c4549d3a57 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -743,6 +743,7 @@ CONFIG_KVM_AMD_SEV=y CONFIG_KVM_SMM=y # CONFIG_KVM_XEN is not set CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y CONFIG_AS_AVX512=y CONFIG_AS_SHA1_NI=y CONFIG_AS_SHA256_NI=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 2845d0515d6d..392caf0e3e0a 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -738,6 +738,7 @@ CONFIG_KVM_AMD_SEV=y CONFIG_KVM_SMM=y # CONFIG_KVM_XEN is not set CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y CONFIG_AS_AVX512=y CONFIG_AS_SHA1_NI=y CONFIG_AS_SHA256_NI=y diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ed90f148140d..463732963a15 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -154,4 +154,14 @@ config KVM_PROVE_MMU config KVM_EXTERNAL_WRITE_TRACKING bool +config KVM_SUPPORTS_CSV_REUSE_ASID + def_bool y + bool "Reuse the same ASID for different HYGON CSV guests" + depends on KVM_AMD_SEV && CPU_SUP_HYGON + depends on !CGROUP_MISC + help + Provide support for reuse the same ASID for difference HYGON + CSV guests, this allow the user to create more CSV guests on + HYGON CPUs with limited ASIDs. + endif # VIRTUALIZATION diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index cfbca578aec5..2fbd9b03c859 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -90,6 +90,17 @@ struct enc_region { unsigned long size; }; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +#define ASID_USERID_LENGTH 20 +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; + +static struct csv_asid_userid *csv_asid_userid_array; +#endif + /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(int min_asid, int max_asid) { @@ -148,7 +159,11 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) misc_cg_uncharge(type, sev->misc_cg, 1); } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +static int sev_asid_new(struct kvm_sev_info *sev, const char *userid, u32 userid_len) +#else static int sev_asid_new(struct kvm_sev_info *sev) +#endif { int asid, min_asid, max_asid, ret; bool retry = true; @@ -164,6 +179,34 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, check whether the userid exists */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + int i = !min_sev_asid ? 1 : min_sev_asid; + + for (; i <= max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable asid %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i; + } + } + } +#endif + /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. @@ -191,6 +234,16 @@ static int sev_asid_new(struct kvm_sev_info *sev) __set_bit(asid, sev_asid_bitmap); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, initialize the new userid */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); + csv_asid_userid_array[asid].userid_len = userid_len; + csv_asid_userid_array[asid].refcnt = 1; + } +#endif + mutex_unlock(&sev_bitmap_lock); return asid; @@ -215,7 +268,25 @@ static void sev_asid_free(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, decrease the reference count if userid exist */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[sev->asid].userid_len) { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); + + if (--csv_asid_userid_array[sev->asid].refcnt == 0) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[sev->asid], 0, + sizeof(struct csv_asid_userid)); + } + } else { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } +#else __set_bit(sev->asid, sev_reclaim_asid_bitmap); +#endif for_each_possible_cpu(cpu) { sd = per_cpu_ptr(&svm_data, cpu); @@ -262,6 +333,11 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; int asid, ret; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + struct kvm_csv_init params; + void *csv_blob = NULL; +#endif + if (kvm->created_vcpus) return -EINVAL; @@ -271,7 +347,43 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->active = true; sev->es_active = argp->id == KVM_SEV_ES_INIT; + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + asid = sev_asid_new(sev, (const char *)csv_blob, params.len); + + /* Free the @csv_blob to prevent memory leak */ + kfree(csv_blob); + csv_blob = NULL; + } else { + asid = sev_asid_new(sev, NULL, 0); + } +#else asid = sev_asid_new(sev); +#endif + if (asid < 0) goto e_no_asid; sev->asid = asid; @@ -291,6 +403,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev_asid_free(sev); sev->asid = 0; e_no_asid: +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_blob); +#endif sev->es_active = false; sev->active = false; return ret; @@ -2489,7 +2604,25 @@ void __init sev_hardware_setup(void) } if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* Initialize CSV ASID reuse array */ + csv_asid_userid_array = kcalloc(nr_asids, + sizeof(struct csv_asid_userid), GFP_KERNEL); + if (!csv_asid_userid_array) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } +#endif + + /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ if (alloc_trans_mempool()) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +#endif bitmap_free(sev_asid_bitmap); sev_asid_bitmap = NULL; bitmap_free(sev_reclaim_asid_bitmap); @@ -2566,8 +2699,12 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { free_trans_mempool(); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); +#endif + } bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -2922,6 +3059,13 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->asid = asid; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[asid].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; +#endif + /* * Flush guest TLB: * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5c0859e7597f..61c5c6990801 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2064,6 +2064,11 @@ struct kvm_csv_command_batch { __u64 csv_batch_list_uaddr; }; +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From b7e276f2057928d62fd91f06c8feb19979c014ae Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:36:36 +0800 Subject: [PATCH 681/953] anolis: crypto: ccp: Define CSV3 key management command id ANBZ: #8681 Define Hygon CSV3 key management command id and structure. CSV3 is the technology for Hygon secure virtualization to improve security of guest with secure isolated memory technology in hardware. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- .../arch/x86/hygon-secure-virtualization.rst | 101 +++++++++ drivers/crypto/ccp/sev-dev.c | 17 ++ include/linux/psp-csv.h | 191 ++++++++++++++++++ 3 files changed, 309 insertions(+) create mode 100644 Documentation/arch/x86/hygon-secure-virtualization.rst create mode 100644 include/linux/psp-csv.h diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst new file mode 100644 index 000000000000..ab94107c91f5 --- /dev/null +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -0,0 +1,101 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +HYGON Secure Virtualization +=========================== + +China Secure Virtualization (CSV) is a key virtualization feature on Hygon +processors. + +The 1st generation of CSV (CSV for short) is a secure virtualization technology +to provide memory encryption for the virtual machine (VM), each VM's memory is +encrypted by its unique encryption key which is managed by secure processor. + +The 2nd generation of CSV (CSV2 for short) provides security enhancement to CSV +by encrypting not only the VM's memory but also the vCPU's registers of the VM. + +The 3rd generation of CSV (CSV3 for short) is a more advanced secure +virtualization technology, it integrates secure processor, memory encryption and +memory isolation to provide the ability to protect guest's private data. The CSV3 +guest's context like CPU registers, control block and nested page table is accessed +only by the guest itself and the secure processor. Neither other guests nor the +host can tamper with the guest's context. + +The secure processor is a separate processor inside Hygon hardware. The firmware +running inside the secure processor performs activities in a secure way, such as +OVMF encryption, VM launch, secure memory management and nested page table +management etc. For more information, please see CSV spec and CSV3 spec from Hygon. + +A CSV guest is running in the memory that is encrypted with a dedicated encrypt +key which is set by the secure processor. And CSV guest's memory encrypt key is +unique from the others. A low latency crypto engine resides on Hygon hardware +to minimize the negative effect on memory bandwidth. In CSV guest, a guest private +page will be automatically decrypted when read from memory and encrypted when +written to memory. + +CSV3 provides an enhancement technology named memory isolation to improve the +security. A dedicated memory isolation hardware is built in Hygon hardware. Only +the secure processor has privilege to configure the isolation hardware. At the +BIOS stage, host will reserve several memory regions as secure which are protected +by the isolation hardware. The secure processor allocates the reserved secure +memory for CSV3 guest and marks the memory as dedicated for the current CSV3 +guest. Any memory access (read or write) to CSV3 guest's private memory outside +the guest will be blocked by isolation hardware. + +A CSV3 guest may declare some memory regions as shared to share data with the +host. When a page is set as shared, read/write on the page will bypass the +isolation hardware and the guest's shared memory can be accessed by the host. A +method named CSV3 secure call command is designed and CSV3 guest sends the secure +call command to the secure processor to change private memory to shared memory. +In the method, 2 dedicated pages are reserved at early stage of the guest. Any +read/write on the dedicated pages will trigger nested page fault. When NPF +happens, the host helps to issue an external command to the secure processor but +cannot tamper with the data in the guest's private memory. Then the secure +processor checks the fault address and handles the command if the address is +exactly the dedicated pages. + +Support for CSV can be determined through the CPUID instruction. The CPUID +function 0x8000001f reports information to CSV:: + + 0x8000001f[eax]: + Bit[1] indicates support for CSV + Bit[3] indicates support for CSV2 + Bit[30] indicates support for CSV3 + +If CSV is support, MSR 0xc0010131 can be used to determine if CSV is active:: + + 0xc0010131: + Bit[0] 0 = CSV is not active + 1 = CSV is active + Bit[1] 0 = CSV2 is not active + 1 = CSV2 is active + Bit[30] 0 = CSV3 is not active + 1 = CSV3 is active + +All CSV/CSV2's configurations must be enabled in CSV3. Linux can activate CSV3 by +default (CONFIG_HYGON_CSV=y, CONFIG_CMA=y). CSV3 guest's memory is managed by +CMA (Contiguous Memory Allocation). User must specify CSV3 total secure memory on +the linux kernel command line with csv_mem_size or csv_mem_percentage:: + + csv_mem_size=nn[MG] + [KNL,CSV] + Reserve specified CSV3 memory size in CMA. CSV3's memory will be + allocated from these CMAs. + For instance, csv_mem_size=40G, 40G memory is reserved for CSV3. + + csv_mem_percentage=nn + [KNL,CSV] + Reserve specified memory size which is prorated according to the + whole system memory size. CSV3 guest's memory will be allocated + from these CMAs. + For instance, csv_mem_percentage=60, means 60% system memory is + reserved for CSV3. + The maximum percentage is 80. And the default percentage is 0. + +Limitations +The reserved CSV3 memory within CMA cannot be used by kernel or any application that +may pin memory using long term gup during the application's life time. +For instance, if the whole system memory is 64G and 32G is reserved for CSV3 with +kernel command line csv_mem_percentage=50, only 32G memory is available for CSV/CSV2. +As a result, user will fail to run a CSV/CSV2 guest with memory size which exceeds +32G. diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 304e9b6f8a92..a8bb844c45a6 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -186,6 +187,22 @@ static int sev_cmd_buffer_len(int cmd) return sizeof(struct csv_data_hgsc_cert_import); case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: + return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: + return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: + return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: + return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: + return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: + return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: + return sizeof(struct csv3_data_dbg_read_mem); default: break; } diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h new file mode 100644 index 000000000000..960459375cd6 --- /dev/null +++ b/include/linux/psp-csv.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon Secure Virtualization feature CSV driver interface + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __PSP_CSV_H__ +#define __PSP_CSV_H__ + +#include + +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + +#endif -- Gitee From 8685180d32295a3826cffc4f8d4b509b6fb3242b Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:52:56 +0800 Subject: [PATCH 682/953] anolis: x86/csv: Manage CSV3 guest's private memory by CMA ANBZ: #8681 The private memory of a CSV3 guest is isolated from VMM and has to be physically contiguous. CMA (Contiguous Memory Allocator) is a memory allocator within the kernel for contiguous physical memory. Use the CMA for the CSV3 private memory management. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- arch/x86/Kconfig | 23 ++ arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + arch/x86/include/asm/csv.h | 53 ++++ arch/x86/kernel/setup.c | 5 + arch/x86/mm/Makefile | 2 + arch/x86/mm/csv.c | 382 ++++++++++++++++++++++++ include/linux/cma.h | 1 + mm/cma.c | 28 +- mm/cma.h | 2 +- 10 files changed, 494 insertions(+), 4 deletions(-) create mode 100644 arch/x86/include/asm/csv.h create mode 100644 arch/x86/mm/csv.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ce947bac65dc..4bdfe83c377e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2044,6 +2044,29 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config HYGON_CSV + bool "Hygon secure virtualization CSV support" + default y + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT && CMA + help + Hygon CSV integrates secure processor, memory encryption and + memory isolation to provide the ability to protect guest's private + data. It has evolved from CSV, CSV2 to CSV3. + + For CSV, the guest's memory is encrypted. + + For CSV2, not only the guest's memory, but also the guest's vCPU + registers are encrypted, neither other guests nor the host can tamper + with the vCPU registers. + + For CSV3, the guest's context like vCPU registers, control block and + nested page table is accessed only by the guest itself and the secure + processor. Neither other guests nor the host can tamper with the + guest's context. + + Say Y here to enable support for the whole capbilities of Hygon secure + virtualization on hygon processor. + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_KEXEC diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 92c4549d3a57..e9f3e126b433 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -489,6 +489,7 @@ CONFIG_EFI_HANDOVER_PROTOCOL=y CONFIG_EFI_MIXED=y # CONFIG_EFI_FAKE_MEMMAP is not set CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 392caf0e3e0a..caace118a265 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -486,6 +486,7 @@ CONFIG_EFI_HANDOVER_PROTOCOL=y CONFIG_EFI_MIXED=y # CONFIG_EFI_FAKE_MEMMAP is not set CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h new file mode 100644 index 000000000000..68f55e1b857b --- /dev/null +++ b/arch/x86/include/asm/csv.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#ifndef __ASM_X86_CSV_H__ +#define __ASM_X86_CSV_H__ + +#ifndef __ASSEMBLY__ + +struct csv_mem { + uint64_t start; + uint64_t size; +}; + +#ifdef CONFIG_HYGON_CSV + +#define CSV_MR_ALIGN_BITS (28) + +extern struct csv_mem *csv_smr; +extern unsigned int csv_smr_num; + +void __init early_csv_reserve_mem(void); + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align); +void csv_release_to_contiguous(phys_addr_t pa, size_t size); + +uint32_t csv_get_smr_entry_shift(void); + +#else /* !CONFIG_HYGON_CSV */ + +#define csv_smr NULL +#define csv_smr_num 0U + +static inline void __init early_csv_reserve_mem(void) { } + +static inline phys_addr_t +csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) { return 0; } +static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } + +static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 01cf311c02ac..02dea98a5da1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -54,6 +54,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -1243,6 +1244,10 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + + /* Try to reserve contiguous memory to support CSV3 */ + early_csv_reserve_mem(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); if (boot_cpu_has(X86_FEATURE_GBPAGES)) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index c80febc44cd2..166a0934d3e4 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -67,3 +67,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c new file mode 100644 index 000000000000..fe5ca7ed4493 --- /dev/null +++ b/arch/x86/mm/csv.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "CSV-CMA: " fmt + +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + +/* 0 percent of total memory by default*/ +static unsigned char csv_mem_percentage; +static unsigned long csv_mem_size; + +static int __init cmdline_parse_csv_mem_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + csv_mem_size = size; + if (!csv_mem_size) + csv_mem_percentage = 0; + } + + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_mem_size); + +static int __init cmdline_parse_csv_mem_percentage(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_mem_percentage = min_t(unsigned char, percentage, 80); + if (csv_mem_percentage != percentage) + pr_warn("csv_mem_percentage is limited to 80.\n"); + } else { + /* Disable CSV CMA. */ + csv_mem_percentage = 0; + pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); + +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); + +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); + +struct csv_cma { + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + struct csv_cma csv_cma[]; +}; + +static unsigned int smr_entry_shift; +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static unsigned long __init present_pages_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long nr_present = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) + nr_present += range_end_pfn - range_start_pfn; + + return nr_present; +} + +static phys_addr_t __init csv_early_percent_memory_on_node(int nid) +{ + return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; +} + +static void __init csv_cma_reserve_mem(void) +{ + int node, i; + unsigned long size; + int idx = 0; + int count; + int cma_array_size; + unsigned long max_spanned_size = 0; + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("Fail to allocate csv_smr\n"); + return; + } + + for_each_node_state(node, N_ONLINE) { + int ret; + char name[CMA_MAX_NAME]; + struct cma_array *array; + unsigned long spanned_size; + unsigned long start = 0, end = 0; + struct csv_cma *csv_cma; + + size = csv_early_percent_memory_on_node(node); + count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT); + if (!count) + continue; + + cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); + array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!array) { + pr_err("Fail to allocate cma_array\n"); + continue; + } + + array->count = 0; + csv_contiguous_pernuma_area[node] = array; + + for (i = 0; i < count; i++) { + csv_cma = &array->csv_cma[i]; + csv_cma->fast = 1; + snprintf(name, sizeof(name), "csv-n%dc%d", node, i); + ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, + 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, + false, name, &(csv_cma->cma), node); + if (ret) { + pr_warn("Fail to reserve memory size 0x%x node %d\n", + 1 << CSV_CMA_SHIFT, node); + break; + } + + if (start > cma_get_base(csv_cma->cma) || !start) + start = cma_get_base(csv_cma->cma); + + if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma)) + end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma); + } + + if (!i) + continue; + + array->count = i; + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + node, (unsigned long)i * CSV_CMA_SIZE, size); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); +} + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +#define CSV_CMA_AREAS 2458 + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_mem_size) { + if (csv_mem_size < (total_pages << PAGE_SHIFT)) { + csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); + if (csv_mem_percentage > 80) + csv_mem_percentage = 80; /* Maximum percentage */ + } else + csv_mem_percentage = 80; /* Maximum percentage */ + } + + if (!csv_mem_percentage) { + pr_warn("Don't reserve any memory\n"); + return; + } + + csv_cma_reserve_mem(); +} + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + int nid; + int nr_nodes; + struct page *page = NULL; + phys_addr_t phys_addr; + int count; + struct csv_cma *csv_cma; + int fast = 1; + + if (!nodes_allowed || size > CSV_CMA_SIZE) { + pr_err("Invalid params, size = 0x%lx, nodes_allowed = %p\n", + size, nodes_allowed); + return 0; + } + + align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE)); +retry: + nr_nodes = nodes_weight(*nodes_allowed); + + /* Traverse from current node */ + nid = numa_node_id(); + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_in(nid, *nodes_allowed); + + for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { + struct cma_array *array = csv_contiguous_pernuma_area[nid]; + + if (!array) + continue; + + count = array->count; + while (count) { + csv_cma = &array->csv_cma[count - 1]; + + /* + * The value check of csv_cma->fast is lockless, but + * that's ok as this don't affect functional correntness + * whatever the value of csv_cma->fast. + */ + if (fast && !csv_cma->fast) { + count--; + continue; + } + page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, true); + if (page) { + page->private = (unsigned long)csv_cma; + if (!csv_cma->fast) + csv_cma->fast = 1; + goto success; + } else + csv_cma->fast = 0; + + count--; + } + } + + if (fast) { + fast = 0; + goto retry; + } else { + pr_err("Fail to alloc secure memory(size = 0x%lx)\n", size); + return 0; + } + +success: + phys_addr = page_to_phys(page); + clflush_cache_range(__va(phys_addr), size); + + return phys_addr; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ + struct csv_cma *csv_cma; + struct page *page = pfn_to_page(pa >> PAGE_SHIFT); + + WARN_ON(!page); + if (likely(page)) { + csv_cma = (struct csv_cma *)page->private; + WARN_ON(!csv_cma); + if (likely(csv_cma)) { + page->private = 0; + csv_cma->fast = 1; + cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + } + } +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); diff --git a/include/linux/cma.h b/include/linux/cma.h index 63873b93deaa..4dadf9a05752 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -56,4 +56,5 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern void cma_reserve_pages_on_error(struct cma *cma); +extern int __init cma_alloc_areas(unsigned int max_cma_size); #endif diff --git a/mm/cma.c b/mm/cma.c index 2b2494fd6b59..721316622bca 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -36,7 +36,10 @@ #include "internal.h" #include "cma.h" -struct cma cma_areas[MAX_CMA_AREAS]; +static struct cma cma_areas_data[MAX_CMA_AREAS]; +static unsigned int cma_areas_size = MAX_CMA_AREAS; +struct cma *cma_areas = cma_areas_data; + unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); @@ -159,6 +162,25 @@ void __init cma_reserve_pages_on_error(struct cma *cma) cma->reserve_pages_on_error = true; } +int __init cma_alloc_areas(unsigned int max_cma_size) +{ + struct cma *data; + + if (max_cma_size <= MAX_CMA_AREAS) + return 0; + + if (cma_area_count || cma_areas != cma_areas_data) + return -EPERM; + + data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES); + if (!data) + return -ENOMEM; + + cma_areas = data; + cma_areas_size = max_cma_size; + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -179,7 +201,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma *cma; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -256,7 +278,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } diff --git a/mm/cma.h b/mm/cma.h index 88a0595670b7..12aba820969c 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -33,7 +33,7 @@ struct cma { bool reserve_pages_on_error; }; -extern struct cma cma_areas[MAX_CMA_AREAS]; +extern struct cma *cma_areas; extern unsigned cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma) -- Gitee From 4376859e5149b98b1f4316c8142e89435f6fc31a Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 11:03:41 +0800 Subject: [PATCH 683/953] anolis: crypto: ccp: Add SET_SMR/SET_SMCR commands for CSV3 ANBZ: #8681 Set guest memory regions in hygon hardware with SET_SMR command. Secure memory control region(SMCR) is a special memory region which is dedicated for CSV3 guest's meta data. SET_SMCR command is used to set SMCR memory in hygon hardware. Both SET_SMR and SET_SMCR should be issued early during platform initialization. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/csv-dev.c | 101 +++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/csv-dev.h | 31 +++++++++++ drivers/crypto/ccp/sev-dev.c | 15 ++++++ 4 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/csv-dev.c create mode 100644 drivers/crypto/ccp/csv-dev.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 94c673805325..0da504999951 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -13,7 +13,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ tee-dev.o \ platform-access.o \ dbc.o \ - psp-ringbuf.o + psp-ringbuf.o \ + csv-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/csv-dev.c b/drivers/crypto/ccp/csv-dev.c new file mode 100644 index 000000000000..b9a9ca4fa3c7 --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include "sev-dev.h" +#include "csv-dev.h" + +/* Function pointers for hooks */ +struct csv_hooks_table csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!csv_smr || !csv_smr_num) + return -EINVAL; + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) + return -ENOMEM; + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + + return ret; +} + +#endif /* CONFIG_HYGON_CSV */ diff --git a/drivers/crypto/ccp/csv-dev.h b/drivers/crypto/ccp/csv-dev.h new file mode 100644 index 000000000000..8865b945728b --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CSV_DEV_H__ +#define __CSV_DEV_H__ + +#include + +/* Hooks table: a table of function pointers filled in when psp init */ +extern struct csv_hooks_table { + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); +} csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); + +#else /* !CONFIG_HYGON_CSV */ + +static inline int +csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index a8bb844c45a6..80a733d208ce 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -33,6 +33,7 @@ #include "psp-dev.h" #include "sev-dev.h" +#include "csv-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" @@ -2375,6 +2376,13 @@ static void sev_exit(struct kref *ref) misc_dev = NULL; } +/* Code to set all of the function pointers for CSV. */ +static inline void csv_install_hooks(void) +{ + /* Install the hook functions for CSV. */ + csv_hooks.sev_do_cmd = sev_do_cmd; +} + static int sev_misc_init(struct sev_device *sev) { struct device *dev = sev->dev; @@ -2404,6 +2412,9 @@ static int sev_misc_init(struct sev_device *sev) return ret; kref_init(&misc_dev->refcount); + + /* Install the hook functions for CSV */ + csv_install_hooks(); } else { kref_get(&misc_dev->refcount); } @@ -2566,6 +2577,10 @@ void sev_pci_init(void) if (!psp_init_on_probe) return; + /* Set SMR for HYGON CSV3 */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_platform_cmd_set_secure_memory_region(sev, &error); + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) -- Gitee From 4e8242651e40b7d43b432198dbad8459fff45247 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Sat, 16 Mar 2024 13:40:54 +0800 Subject: [PATCH 684/953] anolis: KVM: SEV: Pin SEV guest memory out of CMA area ANBZ: #8681 When pin_user_pages_fast pin SEV guest memory without FOLL_LONGTERM flag, the pinning pages may be in CMA area, which resulting in other applications may can't use the CMA area because the pinning pages can't be migrated. Add FOLL_LONGTERM flag to pin_user_pages_fast, which makes sure that we don't keep non_movable pages (due to page reference count) in CMA area. So CMA area can be allocated by other applications. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- arch/x86/kvm/svm/sev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2fbd9b03c859..7f2ea4d9e878 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -551,6 +551,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long locked, lock_limit; struct page **pages; unsigned long first, last; + unsigned int flags = 0; int ret; lockdep_assert_held(&kvm->lock); @@ -583,8 +584,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, if (!pages) return ERR_PTR(-ENOMEM); + flags = write ? FOLL_WRITE : 0; + /* Pin the user virtual address. */ - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + npinned = pin_user_pages_fast(uaddr, npages, flags | FOLL_LONGTERM, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); ret = -ENOMEM; -- Gitee From 0322c881fa5e7b5319850ad8642d10f8458162cd Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Tue, 28 Feb 2023 09:42:36 +0000 Subject: [PATCH 685/953] anolis: sched/core: introduce ht-aware-quota ANBZ: #8648 With acpu accounting, we are able to assess how long the task is running with sibling idle and how long with sibling busy. To make the computing power of tasks stable, we need the tasks to execute a similar number of instructions in each scheduling cycle. To achieve this goal, we introduce ht-aware-quota. When task is running with sibling idle, we consider the task to have execute more instructions, with a certain ratio, and the sibling idle time * ratio will be account its cfs_rq_runtime, not just siblind idle time. The ratio can be configured from /sys/fs/cgroup//cpu.ht_ratio, unit: percentage, range: [100, 200], default: 100. As for now, ht-aware-quota is only valid for cookie'd tasks, as when the sibling is busy, we know what task is running. And sched_feat SCHED_CORE_HT_AWARE_QUOTA is required to be enable. Signed-off-by: Cruz Zhao Reviewed-by: Yi Tao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2956 --- include/linux/sched.h | 3 +++ kernel/sched/core.c | 49 +++++++++++++++++++++++++++++++++++++++ kernel/sched/core_sched.c | 3 ++- kernel/sched/fair.c | 29 +++++++++++++++++++++++ kernel/sched/features.h | 4 ++++ kernel/sched/sched.h | 8 +++++++ 6 files changed, 95 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 44f5db175d2e..06117e8d7420 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -590,6 +590,9 @@ struct sched_entity { */ struct sched_avg avg; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif }; struct sched_rt_entity { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d59ef701ccfd..286dcd3de048 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10576,6 +10576,9 @@ struct task_group *sched_create_group(struct task_group *parent) alloc_uclamp_sched_group(tg, parent); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + tg->ht_ratio = 100; +#endif return tg; err: @@ -11463,6 +11466,38 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +static int cpu_ht_ratio_write(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 ht_ratio) +{ + struct task_group *tg = css_tg(css); + int cpu; + + if (ht_ratio < 100 || ht_ratio > 200) + return -1; + + if (tg == &root_task_group) + return -1; + + tg->ht_ratio = ht_ratio; + for_each_online_cpu(cpu) { + struct sched_entity *se = tg->se[cpu]; + + se->ht_ratio = ht_ratio; + } + + return 0; +} + +static u64 cpu_ht_ratio_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->ht_ratio; +} +#endif + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11531,6 +11566,13 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* Terminate */ }; @@ -11758,6 +11800,13 @@ static struct cftype cpu_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index f931992fc08e..924859051b5f 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -292,9 +292,10 @@ void __sched_core_account_sibidle(struct rq *rq) */ __account_sibidle_time(p, delta, delta_task, !!rq->core->core_forceidle_count); + account_ht_aware_quota(p, delta_task); } -out: +out:; #ifdef CONFIG_SCHED_ACPU for_each_cpu(i, smt_mask) { rq_i = cpu_rq(i); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a1f48b1cc568..44d9f645c817 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12554,6 +12554,32 @@ static int task_is_throttled_fair(struct task_struct *p, int cpu) #endif return throttled_hierarchy(cfs_rq); } + +#ifdef CONFIG_CFS_BANDWIDTH +void account_ht_aware_quota(struct task_struct *p, u64 delta) +{ + struct sched_entity *se; + unsigned int ht_ratio; + struct cfs_rq *cfs_rq; + + /* We only account ht_aware_quota for cookied task. */ + if (sched_feat(SCHED_CORE_HT_AWARE_QUOTA) && p->core_cookie) { + se = &p->se; + cfs_rq = task_cfs_rq(p); + + if (se->parent) { + ht_ratio = se->parent->ht_ratio; + if (ht_ratio > 100 && ht_ratio <= 200) { + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + account_cfs_rq_runtime(cfs_rq, + delta * (ht_ratio - 100) / 100); + } + } + } + } +} +#endif #else static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} #endif @@ -12839,6 +12865,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + se->ht_ratio = 100; +#endif } return 1; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index f770168230ae..ee7fb7220ed8 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -89,3 +89,7 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) SCHED_FEAT(LATENCY_WARN, false) SCHED_FEAT(HZ_BW, true) + +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +SCHED_FEAT(SCHED_CORE_HT_AWARE_QUOTA, false) +#endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fc96d29e2c70..fe396568d71c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -420,6 +420,9 @@ struct task_group { /* Effective clamp values used for a task group */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif }; @@ -1336,6 +1339,11 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); extern void sched_core_get(void); extern void sched_core_put(void); +#ifdef CONFIG_CFS_BANDWIDTH +extern void account_ht_aware_quota(struct task_struct *p, u64 delta); +#else +void account_ht_aware_quota(struct task_struct *p, u64 delta) {} +#endif #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) -- Gitee From d1177215ef1e5838bf3d39073b37a24ec9dde060 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Mon, 20 Jan 2020 17:24:11 +0800 Subject: [PATCH 686/953] anolis: sched/isolation: dynamical CPU isolation support ANBZ: #8684 We have so many wild tasks under root cgroup, they come from anywhere and there are no good way to manage them properly. However, we don't want them to disturb our critical tasks, for example the IO process tasks in MOC environment. Currently we're using 'isolcpu' cmdline parameter to isolate CPUs, however this is a static config, so later when we want to release more CPU resources, reboot is required. This patch introduced a way to dynamically isolate CPUs from those wild tasks, give admin the capability of dynamical CPU resource arrangement. By 'echo CPULIST > /proc/dyn_isolcpus', the CPUs will be: * Isolated from the unbound userspace wild tasks * Get rid of schedule domain * Isolated from the unbound workqueue worker Reviewed-by: Shanpei Chen Signed-off-by: Michael Wang Acked-by: Michael Wang Reviewed-by: Yihao Wu Signed-off-by: Cruz Zhao --- include/linux/sched/isolation.h | 7 ++ kernel/cgroup/cpuset.c | 23 +++- kernel/sched/isolation.c | 188 ++++++++++++++++++++++++++++++++ 3 files changed, 215 insertions(+), 3 deletions(-) diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index fe1a46f30d24..bf538a280c82 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -55,6 +55,13 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ +#if defined(CONFIG_CPU_ISOLATION) && defined(CONFIG_CGROUP_SCHED) +DECLARE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +extern void wilds_cpus_allowed(struct cpumask *pmask); +#else +static inline void wilds_cpus_allowed(struct cpumask *pmask) {} +#endif + static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1c2543edefee..f5626ccdc786 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -2645,9 +2645,10 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { lockdep_assert_held(&cpuset_mutex); - if (cs != &top_cpuset) + if (cs != &top_cpuset) { guarantee_online_cpus(task, cpus_attach); - else + wilds_cpus_allowed(cpus_attach); + } else cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), cs->subparts_cpus); /* @@ -3476,8 +3477,24 @@ static void cpuset_fork(struct task_struct *task) rcu_read_unlock(); if (same_cs) { - if (cs == &top_cpuset) + if (cs == &top_cpuset) { + /* + * This is necessary since update_wilds_cpumask() + * could have missed the 'task', if it's parent is + * the last one on the iteratoration list, like: + * + * 1. 'task' dup old dyn_allowed from parent + * 2. update_wilds_cpumask() begin + * 3. new dyn_allowed applied to parent + * 4. update_wilds_cpumask() end + * 5. 'task' add into iteratoration list + * + * Fix this by redup current's allowed here if changed. + */ + if (!cpumask_equal(task->cpus_ptr, current->cpus_ptr)) + set_cpus_allowed_ptr(task, current->cpus_ptr); return; + } set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 373d42c707bc..cb975a8468f6 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -53,8 +53,32 @@ int housekeeping_any_cpu(enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_any_cpu); +#ifdef CONFIG_CGROUP_SCHED +/* + * dyn_allowed -- allowed CPUs for wild tasks. + * + * dyn_isolated -- isolated CPUs for wild tasks. + * + * dyn_possible -- possible CPUs for dynamical isolation. + */ +static cpumask_var_t dyn_allowed; +static cpumask_var_t dyn_isolated; +static cpumask_var_t dyn_possible; + +static bool dyn_isolcpus_ready; + +DEFINE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +EXPORT_SYMBOL_GPL(dyn_isolcpus_enabled); +#endif + const struct cpumask *housekeeping_cpumask(enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return dyn_allowed; +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return housekeeping.cpumasks[type]; @@ -72,6 +96,12 @@ EXPORT_SYMBOL_GPL(housekeeping_affine); bool housekeeping_test_cpu(int cpu, enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return cpumask_test_cpu(cpu, dyn_allowed); +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]); @@ -79,10 +109,30 @@ bool housekeeping_test_cpu(int cpu, enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_test_cpu); +#ifdef CONFIG_CGROUP_SCHED +static inline void free_dyn_masks(void) +{ + free_cpumask_var(dyn_allowed); + free_cpumask_var(dyn_isolated); + free_cpumask_var(dyn_possible); +} +#endif + void __init housekeeping_init(void) { enum hk_type type; +#ifdef CONFIG_CGROUP_SCHED + if (zalloc_cpumask_var(&dyn_allowed, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_isolated, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_possible, GFP_KERNEL)) { + cpumask_copy(dyn_allowed, cpu_possible_mask); + cpumask_copy(dyn_possible, cpu_possible_mask); + dyn_isolcpus_ready = true; + } else + free_dyn_masks(); +#endif + if (!housekeeping.flags) return; @@ -95,6 +145,12 @@ void __init housekeeping_init(void) /* We need at least one CPU to handle housekeeping work */ WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } +#ifdef CONFIG_CGROUP_SCHED + if ((housekeeping.flags & HK_FLAG_DOMAIN) && type < HK_TYPE_MAX) { + cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); + cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); + } +#endif } static void __init housekeeping_setup_type(enum hk_type type, @@ -239,3 +295,135 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup); + +#ifdef CONFIG_CGROUP_SCHED +static int dyn_isolcpus_show(struct seq_file *s, void *p) +{ + seq_printf(s, "%*pbl\n", cpumask_pr_args(dyn_isolated)); + + return 0; +} + +static int dyn_isolcpus_open(struct inode *inode, struct file *file) +{ + return single_open(file, dyn_isolcpus_show, NULL); +} + +void wilds_cpus_allowed(struct cpumask *pmask) +{ + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + cpumask_and(pmask, pmask, dyn_allowed); +} + +void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) +{ + struct css_task_iter it; + struct task_struct *task; + struct task_group *tg = &root_task_group; + + css_task_iter_start(&tg->css, 0, &it); + while ((task = css_task_iter_next(&it))) { + if (task->flags & PF_KTHREAD) + continue; + + if (!cpumask_equal(task->cpus_ptr, old_allowed)) + continue; + + set_cpus_allowed_ptr(task, new_allowed); + } + css_task_iter_end(&it); +} + +static DEFINE_MUTEX(dyn_isolcpus_mutex); + +static ssize_t write_dyn_isolcpus(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = count; + cpumask_var_t isolated; + cpumask_var_t new_allowed; + cpumask_var_t old_allowed; + + mutex_lock(&dyn_isolcpus_mutex); + + if (!zalloc_cpumask_var(&isolated, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + if (!zalloc_cpumask_var(&new_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_isolated; + } + + if (!zalloc_cpumask_var(&old_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_new_allowed; + } + + if (cpumask_parselist_user(buf, count, isolated)) { + ret = -EINVAL; + goto free_all; + } + + if (!cpumask_subset(isolated, dyn_possible)) { + ret = -EINVAL; + goto free_all; + } + + /* At least reserve one for wild tasks to run */ + cpumask_andnot(new_allowed, dyn_possible, isolated); + if (!cpumask_intersects(new_allowed, cpu_online_mask)) { + ret = -EINVAL; + goto free_all; + } + + cpumask_copy(old_allowed, dyn_allowed); + cpumask_copy(dyn_allowed, new_allowed); + cpumask_copy(dyn_isolated, isolated); + + if (cpumask_empty(dyn_isolated)) + static_branch_disable(&dyn_isolcpus_enabled); + else + static_branch_enable(&dyn_isolcpus_enabled); + + update_wilds_cpumask(new_allowed, old_allowed); + + rebuild_sched_domains(); + workqueue_set_unbound_cpumask(new_allowed); + +free_all: + free_cpumask_var(old_allowed); +free_new_allowed: + free_cpumask_var(new_allowed); +free_isolated: + free_cpumask_var(isolated); +out: + mutex_unlock(&dyn_isolcpus_mutex); + + return ret; +} + +static const struct proc_ops proc_dyn_isolcpus_operations = { + .proc_open = dyn_isolcpus_open, + .proc_read = seq_read, + .proc_write = write_dyn_isolcpus, + .proc_lseek = noop_llseek, +}; + +static int __init dyn_isolcpus_init(void) +{ + if (dyn_isolcpus_ready && + !proc_create("dyn_isolcpus", 0200, NULL, + &proc_dyn_isolcpus_operations)) { + dyn_isolcpus_ready = false; + free_dyn_masks(); + } + + if (!dyn_isolcpus_ready) + pr_err("Initialize Dynamical Isolation Failed\n"); + + return 0; +} +early_initcall(dyn_isolcpus_init); +#endif -- Gitee From 8747d1ba294868f2f54f41f68bd873512d70e17c Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 2 Feb 2023 16:54:45 +0800 Subject: [PATCH 687/953] anolis: sched/isolation: dynamically isolate all tasks ANBZ: #8684 Previously, only tasks in the root task group were isolated. But production proves universal isolation more useful, especially when ops engineer has limited control over the system. In order to prevent any tasks disturbing critical tasks, we choose to isolate all tasks, instead of wild tasks. Also dyn_isolcpus behaves more like isolcpus kernel cmdline now. Signed-off-by: Yihao Wu --- kernel/sched/isolation.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index cb975a8468f6..94802c50c44a 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -317,12 +317,10 @@ void wilds_cpus_allowed(struct cpumask *pmask) void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) { - struct css_task_iter it; - struct task_struct *task; - struct task_group *tg = &root_task_group; + struct task_struct *g, *task; - css_task_iter_start(&tg->css, 0, &it); - while ((task = css_task_iter_next(&it))) { + rcu_read_lock(); + for_each_process_thread(g, task) { if (task->flags & PF_KTHREAD) continue; @@ -331,7 +329,7 @@ void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) set_cpus_allowed_ptr(task, new_allowed); } - css_task_iter_end(&it); + rcu_read_unlock(); } static DEFINE_MUTEX(dyn_isolcpus_mutex); -- Gitee From 4d9f21cd57dcc0bc168b8b1a49c53c71177d260f Mon Sep 17 00:00:00 2001 From: suhua Date: Fri, 16 Jun 2023 10:19:02 +0800 Subject: [PATCH 688/953] anolis: sched: Copy mask when dyn_isolcpus_ready is true ANBZ: #8684 In the process of initializing dyn_allowed and dyn_possible by housekeeping_init function, the check to see if the target cpumask memory space is successfully requested is missing. Signed-off-by: suhua Reviewed-by: Tianchen Ding Reviewed-by: Cruz Zhao Reviewed-by: Zelin Deng --- kernel/sched/isolation.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 94802c50c44a..11f11f0cb239 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -146,7 +146,8 @@ void __init housekeeping_init(void) WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } #ifdef CONFIG_CGROUP_SCHED - if ((housekeeping.flags & HK_FLAG_DOMAIN) && type < HK_TYPE_MAX) { + if (dyn_isolcpus_ready && (housekeeping.flags & HK_FLAG_DOMAIN) && + type < HK_TYPE_MAX) { cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); } -- Gitee From 8a7a237c819a9c116190466444a0049fee1d3a14 Mon Sep 17 00:00:00 2001 From: lishuo Date: Wed, 3 Apr 2024 09:28:52 +0000 Subject: [PATCH 689/953] anolis: DRM: Add Phytium Display Engine support. ANBZ: #8701 phytium inclusion category: feature CVE: NA --------------------------------------------------------- This is Phytium Display Engine support,DC/DP driver patch. Signed-off-by: Yang Xun Signed-off-by: Chen Baozi Signed-off-by: lishuo Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3006 --- .../devicetree/bindings/gpu/phytium,dc.yaml | 49 + .../devicetree/bindings/vendor-prefixes.yaml | 2 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/phytium/Kconfig | 12 + drivers/gpu/drm/phytium/Makefile | 20 + drivers/gpu/drm/phytium/pe220x_dc.c | 255 ++ drivers/gpu/drm/phytium/pe220x_dc.h | 31 + drivers/gpu/drm/phytium/pe220x_dp.c | 514 ++++ drivers/gpu/drm/phytium/pe220x_dp.h | 14 + drivers/gpu/drm/phytium/pe220x_reg.h | 209 ++ drivers/gpu/drm/phytium/phytium_crtc.c | 484 +++ drivers/gpu/drm/phytium/phytium_crtc.h | 39 + drivers/gpu/drm/phytium/phytium_debugfs.c | 456 +++ drivers/gpu/drm/phytium/phytium_debugfs.h | 13 + drivers/gpu/drm/phytium/phytium_display_drv.c | 434 +++ drivers/gpu/drm/phytium/phytium_display_drv.h | 174 ++ drivers/gpu/drm/phytium/phytium_dp.c | 2639 +++++++++++++++++ drivers/gpu/drm/phytium/phytium_dp.h | 156 + drivers/gpu/drm/phytium/phytium_fb.c | 131 + drivers/gpu/drm/phytium/phytium_fb.h | 26 + drivers/gpu/drm/phytium/phytium_fbdev.c | 151 + drivers/gpu/drm/phytium/phytium_fbdev.h | 13 + drivers/gpu/drm/phytium/phytium_gem.c | 509 ++++ drivers/gpu/drm/phytium/phytium_gem.h | 42 + drivers/gpu/drm/phytium/phytium_panel.c | 420 +++ drivers/gpu/drm/phytium/phytium_panel.h | 46 + drivers/gpu/drm/phytium/phytium_pci.c | 387 +++ drivers/gpu/drm/phytium/phytium_pci.h | 26 + drivers/gpu/drm/phytium/phytium_plane.c | 632 ++++ drivers/gpu/drm/phytium/phytium_plane.h | 46 + drivers/gpu/drm/phytium/phytium_platform.c | 307 ++ drivers/gpu/drm/phytium/phytium_platform.h | 18 + drivers/gpu/drm/phytium/phytium_reg.h | 365 +++ drivers/gpu/drm/phytium/px210_dc.c | 326 ++ drivers/gpu/drm/phytium/px210_dc.h | 30 + drivers/gpu/drm/phytium/px210_dp.c | 920 ++++++ drivers/gpu/drm/phytium/px210_dp.h | 13 + drivers/gpu/drm/phytium/px210_reg.h | 349 +++ include/linux/pci_ids.h | 2 + 40 files changed, 10263 insertions(+) create mode 100644 Documentation/devicetree/bindings/gpu/phytium,dc.yaml create mode 100644 drivers/gpu/drm/phytium/Kconfig create mode 100644 drivers/gpu/drm/phytium/Makefile create mode 100644 drivers/gpu/drm/phytium/pe220x_dc.c create mode 100644 drivers/gpu/drm/phytium/pe220x_dc.h create mode 100644 drivers/gpu/drm/phytium/pe220x_dp.c create mode 100644 drivers/gpu/drm/phytium/pe220x_dp.h create mode 100644 drivers/gpu/drm/phytium/pe220x_reg.h create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.c create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.h create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.c create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.h create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.c create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.h create mode 100644 drivers/gpu/drm/phytium/phytium_dp.c create mode 100644 drivers/gpu/drm/phytium/phytium_dp.h create mode 100644 drivers/gpu/drm/phytium/phytium_fb.c create mode 100644 drivers/gpu/drm/phytium/phytium_fb.h create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.c create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.h create mode 100644 drivers/gpu/drm/phytium/phytium_gem.c create mode 100644 drivers/gpu/drm/phytium/phytium_gem.h create mode 100644 drivers/gpu/drm/phytium/phytium_panel.c create mode 100644 drivers/gpu/drm/phytium/phytium_panel.h create mode 100644 drivers/gpu/drm/phytium/phytium_pci.c create mode 100644 drivers/gpu/drm/phytium/phytium_pci.h create mode 100644 drivers/gpu/drm/phytium/phytium_plane.c create mode 100644 drivers/gpu/drm/phytium/phytium_plane.h create mode 100644 drivers/gpu/drm/phytium/phytium_platform.c create mode 100644 drivers/gpu/drm/phytium/phytium_platform.h create mode 100644 drivers/gpu/drm/phytium/phytium_reg.h create mode 100644 drivers/gpu/drm/phytium/px210_dc.c create mode 100644 drivers/gpu/drm/phytium/px210_dc.h create mode 100644 drivers/gpu/drm/phytium/px210_dp.c create mode 100644 drivers/gpu/drm/phytium/px210_dp.h create mode 100644 drivers/gpu/drm/phytium/px210_reg.h diff --git a/Documentation/devicetree/bindings/gpu/phytium,dc.yaml b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml new file mode 100644 index 000000000000..5be348f6e23f --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dc/snps,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Display Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/dc/display-controller.yaml# + +properties: + compatible: + const: phytium,dc + + reg: + minItems: 1 + items: + - description: Offset and length of the memory mapped registers + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description:Display controller reference clock source + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 573578db9509..133cfb2bb05c 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1045,6 +1045,8 @@ patternProperties: description: PHICOMM Co., Ltd. "^phytec,.*": description: PHYTEC Messtechnik GmbH + "^phytium,.*": + description: Phytium Technology Co., Ltd. "^picochip,.*": description: Picochip Ltd "^pine64,.*": diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 353ffa210f0e..2a89adbbf9fa 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -388,6 +388,8 @@ source "drivers/gpu/drm/solomon/Kconfig" source "drivers/gpu/drm/sprd/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 12ad840d9e3a..017ff5a6ebe2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -199,3 +199,4 @@ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 000000000000..5f540962129a --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM && ARCH_PHYTIUM + select DRM_KMS_HELPER + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDCP_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 000000000000..1f68cdcd80da --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + px210_dp.o \ + phytium_panel.o \ + px210_dc.o \ + phytium_pci.o \ + pe220x_dp.o \ + pe220x_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c new file mode 100644 index 000000000000..8f74199f9a47 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void pe220x_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int pe220x_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t pe220x_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t pe220x_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int pe220x_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, PE220X_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void pe220x_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + pe220x_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void pe220x_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + pe220x_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_primary_formats; + *format_count = ARRAY_SIZE(pe220x_primary_formats); +} + +void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_cursor_formats_modifiers; + *formats = pe220x_cursor_formats; + *format_count = ARRAY_SIZE(pe220x_cursor_formats); +} + +void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PE220X_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h new file mode 100644 index 000000000000..f88a054cf0d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DC_H__ +#define __PE220X_DC_H__ + +#define PE220X_DC_PIX_CLOCK_MAX (594000) +#define PE220X_DC_HDISPLAY_MAX 3840 +#define PE220X_DC_VDISPLAY_MAX 2160 +#define PE220X_DC_ADDRESS_MASK 0x7f + +extern void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void pe220x_dc_hw_disable(struct drm_crtc *crtc); +extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void pe220x_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __PE220X_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c new file mode 100644 index 000000000000..54a6e8ac454b --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_dp.h" +#include "pe220x_dp.h" + +static uint8_t pe220x_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int pe220x_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int pe220x_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, PE220X_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, PE220X_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > PE220X_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool pe220x_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int pe220x_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, PE220X_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t pe220x_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return pe220x_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func pe220x_dp_funcs = { + .dp_hw_get_source_lane_count = pe220x_dp_hw_get_source_lane_count, + .dp_hw_reset = pe220x_dp_hw_reset, + .dp_hw_spread_is_enable = pe220x_dp_hw_spread_is_enable, + .dp_hw_set_backlight = pe220x_dp_hw_set_backlight, + .dp_hw_get_backlight = pe220x_dp_hw_get_backlight, + .dp_hw_disable_backlight = pe220x_dp_hw_disable_backlight, + .dp_hw_enable_backlight = pe220x_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = pe220x_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = pe220x_dp_hw_poweron_panel, + .dp_hw_init_phy = pe220x_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = pe220x_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = pe220x_dp_hw_set_phy_lane_and_rate, +}; + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &pe220x_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h new file mode 100644 index 000000000000..6b763d996631 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DP_H__ +#define __PE220X_DP_H__ + +#define PE220X_DP_BACKLIGHT_MAX 100 + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_reg.h b/drivers/gpu/drm/phytium/pe220x_reg.h new file mode 100644 index 000000000000..88fc9c7383a5 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display engine register + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_REG_H__ +#define __PE220X_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define PE220X_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define PE220X_DC_CMD_REGISTER(pipe) (PE220X_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define PE220X_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define PE220X_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define PE220X_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define PE220X_PHY_BASE(pipe) (0x100000*pipe) + +#define PE220X_PHY_PIPE_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define PE220X_PHY_MODE(pipe) (PE220X_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define PE220X_PHY_LINK_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define PE220X_PHY_PLL_EN(pipe) (PE220X_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define PE220X_PHY_PMA_WIDTH(pipe) (PE220X_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define PE220X_PHY_PLL_SOURCE_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x4004C) + +#define PE220X_PHY_PMA0_POWER(pipe) (PE220X_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define PE220X_PHY_LINK_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define PE220X_PHY_SGMII_DPSEL_INIT(pipe) (PE220X_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define PE220X_PHY_APB_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define PE220X_PHY_PLL_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define PE220X_PHY_PMA_CONTROL(pipe) (PE220X_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define PE220X_PHY_PMA_CONTROL2(pipe) (PE220X_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define PE220X_PHY_PLL0_CLK_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define PE220X_PHY_HSCLK0_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define PE220X_PHY_HSCLK0_DIV(pipe) (PE220X_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define PE220X_PHY_PLLDRC0_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define PE220X_PHY_PLL0_DSM_M0(pipe) (PE220X_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define PE220X_PHY_PLL0_VCOCAL_START(pipe) (PE220X_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define PE220X_PHY_PLL0_VCOCAL_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define PE220X_PHY_PLL0_CP_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x690) +#define PE220X_PHY_PLL0_CP_IADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x694) +#define PE220X_PHY_PLL0_CP_FILT_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x698) +#define PE220X_PHY_PLL0_INTDIV(pipe) (PE220X_PHY_BASE(pipe) + 0x240) +#define PE220X_PHY_PLL0_FRACDIVL(pipe) (PE220X_PHY_BASE(pipe) + 0x244) +#define PE220X_PHY_PLL0_FRACDIVH(pipe) (PE220X_PHY_BASE(pipe) + 0x248) +#define PE220X_PHY_PLL0_HIGH_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x24c) +#define PE220X_PHY_PLL0_PDIAG_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x680) +#define PE220X_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x220) +#define PE220X_PHY_PLL0_LOCK_PEFCNT(pipe) (PE220X_PHY_BASE(pipe) + 0x270) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x278) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x27c) + +#define PE220X_PHY_PLL0_TX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define PE220X_PHY_PLL0_TX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define PE220X_PHY_PLL0_TX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define PE220X_PHY_PLL0_RX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define PE220X_PHY_PLL0_RX_PSC_CAL(pipe) (PE220X_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define PE220X_PHY_PLL0_XCVR_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define PE220X_PHY_PLL0_RX_GCSM1_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_GCSM2_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PE220X_PHY_PLL0_TX_DIAG_ACYA(pipe) (PE220X_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define PE220X_PHY_PLL0_TX_TXCC_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define PE220X_PHY_PLL0_TX_DRV(pipe) (PE220X_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define PE220X_PHY_PLL0_TX_MGNFS(pipe) (PE220X_PHY_BASE(pipe) + 0x18140) + +#define PE220X_PHY_PLL0_TX_CPOST(pipe) (PE220X_PHY_BASE(pipe) + 0x18130) + +#endif /* __PE220X_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 000000000000..628357837da6 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) ((float)((X) + (Y))) +#define MATH_Multiply(X, Y) ((float)((X) * (Y))) +#define MATH_Divide(X, Y) ((float)((X) / (Y))) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) ((float)(X)) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static int phytium_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + if (crtc->state->gamma_lut) + phytium_crtc_gamma_set(crtc); + else + phytium_crtc_gamma_init(crtc); + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_crtc->dc_hw_config_pix_clock = px210_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = px210_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = PX210_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = PX210_DCREQ_BASE(phys_pipe); + priv->address_transform_base = PX210_ADDRESS_TRANSFORM_BASE; + } else if (IS_PE220X(priv)) { + phytium_crtc->dc_hw_config_pix_clock = pe220x_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = pe220x_dc_hw_disable; + phytium_crtc->dc_hw_reset = pe220x_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = PE220X_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = PE220X_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 000000000000..78a841c1c684 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 000000000000..eedad22c1536 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 000000000000..dc784bc557a7 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 000000000000..60c7a20e7ca2 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.fb_modifiers_not_supported = false; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + phytium_irq_preinstall(dev); + ret = request_irq(priv->irq, phytium_display_irq_handler, + IRQF_SHARED, dev->driver->name, dev); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + + phytium_drm_fbdev_fini(dev); + phytium_irq_uninstall(dev); + free_irq(priv->irq, dev); + drm_mode_config_cleanup(dev); +} + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .dumb_create = phytium_gem_dumb_create, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 000000000000..70080dad8621 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_PX210, + PHYTIUM_PLATFORM_PE220X, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_PX210(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PX210) +#define IS_PE220X(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PE220X) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 000000000000..98a06ccbc48d --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "px210_dp.h" +#include "pe220x_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; +static int codec_id = PHYTIUM_DP_AUDIO_ID; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear PX210_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strscpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + if (phytium_dp->is_edp) + edid = phytium_dp->edp_edid; + else + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + if (!phytium_dp->is_edp) + kfree(edid); + + return ret; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(&phytium_dp->aux, phytium_dp->dpcd); + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(&phytium_dp->aux, phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_PHY_TEST_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_fini_connector(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) + phytium_edp_backlight_on(phytium_dp); + +} + +enum drm_mode_status +phytium_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if (dc_fake_mode_enable && + (phytium_dp->native_mode.clock == mode->clock) && + (phytium_dp->native_mode.htotal == mode->htotal) && + (phytium_dp->native_mode.vtotal == mode->vtotal)) + return MODE_OK; + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, + .mode_valid = phytium_encoder_mode_valid, +}; + +void phytium_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + phytium_dp_audio_codec_fini(phytium_dp); + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = phytium_dp_encoder_destroy, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_mute_stream(struct device *dev, void *data, bool enable, int direction) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .mute_stream = phytium_dp_audio_mute_stream, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + codec_id, + &codec_data, sizeof(codec_data)); + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + codec_id += 1; + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp) +{ + + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + platform_device_unregister(phytium_dp->audio_pdev); + phytium_dp->audio_pdev = NULL; + codec_id -= 1; +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) { + DRM_ERROR("detect edp dpcd failed\n"); + return false; + } + + phytium_dp->edp_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (!phytium_dp->edp_edid) { + DRM_ERROR("get edp edid failed\n"); + return false; + } + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp) +{ + kfree(phytium_dp->edp_edid); + + phytium_dp->edp_edid = NULL; + phytium_edp_panel_poweroff(phytium_dp); +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_PX210(priv)) { + px210_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PX210_DP_BASE(port); + priv->phy_access_base[port] = PX210_PHY_ACCESS_BASE(port); + } else if (IS_PE220X(priv)) { + pe220x_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PE220X_DP_BASE(port); + priv->phy_access_base[port] = PE220X_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 000000000000..ada3f42a6868 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + struct edid *edp_edid; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define PHYTIUM_DP_AUDIO_ID (('P' << 24) + ('H' << 16) + ('Y' << 8)) +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 000000000000..879065964729 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_put(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) { + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + phytium_fb->base.obj[i] = &phytium_gem_obj[i]->base; + } + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_put(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 000000000000..e096aa30ccb5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +#include + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 000000000000..e929ad281724 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static void phytium_fbdev_destroy(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); +} + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static const struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + FB_DEFAULT_IOMEM_OPS, + .fb_destroy = phytium_fbdev_destroy, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_info(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_info(fbi, helper, sizes); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, 32, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_initial_config(helper); + return 0; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_info(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 000000000000..fe352557a4f9 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 000000000000..f470f769dce6 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +int phytium_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + iosys_map_set_vaddr(map, phytium_obj->vaddr); + + return 0; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vm_flags_clear(vma, VM_PFNMAP); + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs phytium_drm_gem_object_funcs = { + .free = phytium_gem_free_object, + .get_sg_table = phytium_gem_prime_get_sg_table, + .vmap = phytium_gem_prime_vmap, + .vunmap = phytium_gem_prime_vunmap, + .vm_ops = &phytium_vm_ops, +}; + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->base.funcs = &phytium_drm_gem_object_funcs; + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_put(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_put(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 000000000000..17c438e6e63c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 000000000000..1cd266e868b3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 000000000000..f9e2c7e65896 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 000000000000..f93ab85395c5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "px210_dc.h" +#include "px210_dp.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_VRAM; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check px210 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_PX210(priv)) { + pci_priv->dc_hw_vram_init = px210_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = px210_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = px210_dc_hw_fb_format_check; + } else if (IS_PE220X(priv)) { + pci_priv->dc_hw_vram_init = pe220x_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enable device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enable msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_PX210(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_PX210(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info px210_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PX210), + .total_pipes = 3, + .crtc_clock_max = PX210_DC_PIX_CLOCK_MAX, + .hdisplay_max = PX210_DC_HDISPLAY_MAX, + .vdisplay_max = PX210_DC_VDISPLAY_MAX, + .address_mask = PX210_DC_ADDRESS_MASK, + .backlight_max = PX210_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&px210_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&pe220x_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 000000000000..92b08fcb0452 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 000000000000..9f35d57cd726 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = px210_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = pe220x_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 000000000000..5527579b0348 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 000000000000..d28aadba7c30 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_PE220X(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &pe220x_info, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&pe220x_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 000000000000..42f6570b476f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 000000000000..99ac9d4cb4d9 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define PX210_PIPE_BASE(pipe) (0x8000*pipe) +#define PX210_DC_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x0000) +#define PX210_DCREQ_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x2000) +#define PX210_DP_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x3000) +#define PX210_ADDRESS_TRANSFORM_BASE 0x4000 +#define PX210_PHY_ACCESS_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x5000) + +#define PE220X_DC_BASE(pipe) (0x1000*pipe) +#define PE220X_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define PE220X_ADDRESS_TRANSFORM_BASE 0x8000 +#define PE220X_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define PX210_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c new file mode 100644 index 000000000000..ae022f9fe3fb --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int px210_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t px210_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t px210_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int px210_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void px210_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_MSI_CLEAR); +} + +void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void px210_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + px210_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); +} + +int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_primary_formats_modifiers; + *formats = px210_primary_formats; + *format_count = ARRAY_SIZE(px210_primary_formats); +} + +void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_cursor_formats_modifiers; + *formats = px210_cursor_formats; + *format_count = ARRAY_SIZE(px210_cursor_formats); +} + +void px210_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, PX210_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, PX210_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, PX210_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_PLANE0_CONFIG); + } +} + +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/px210_dc.h b/drivers/gpu/drm/phytium/px210_dc.h new file mode 100644 index 000000000000..1d8220faadc7 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DC_H__ +#define __PX210_DC_H__ + +#define PX210_DC_PIX_CLOCK_MAX (594000) +#define PX210_DC_HDISPLAY_MAX 3840 +#define PX210_DC_VDISPLAY_MAX 2160 +#define PX210_DC_ADDRESS_MASK 0x7f + +extern void px210_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void px210_dc_hw_disable(struct drm_crtc *crtc); +extern int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void px210_dc_hw_update_dcreq(struct drm_plane *plane); +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __PX210_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dp.c b/drivers/gpu/drm/phytium/px210_dp.c new file mode 100644 index 000000000000..be3c520a3c09 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_dp.h" +#include "px210_dp.h" + +static uint8_t px210_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int px210_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int px210_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, PX210_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void px210_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void px210_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void px210_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t px210_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int px210_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > PX210_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool px210_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int px210_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], PX210_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t px210_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return px210_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func px210_dp_funcs = { + .dp_hw_get_source_lane_count = px210_dp_hw_get_source_lane_count, + .dp_hw_reset = px210_dp_hw_reset, + .dp_hw_spread_is_enable = px210_dp_hw_spread_is_enable, + .dp_hw_set_backlight = px210_dp_hw_set_backlight, + .dp_hw_get_backlight = px210_dp_hw_get_backlight, + .dp_hw_disable_backlight = px210_dp_hw_disable_backlight, + .dp_hw_enable_backlight = px210_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = px210_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = px210_dp_hw_poweron_panel, + .dp_hw_init_phy = px210_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = px210_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = px210_dp_hw_set_phy_lane_and_rate, +}; + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &px210_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h new file mode 100644 index 000000000000..f2436ace1845 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DP_H__ +#define __PX210_DP_H__ + +#define PX210_DP_BACKLIGHT_MAX 100 + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_reg.h b/drivers/gpu/drm/phytium/px210_reg.h new file mode 100644 index 000000000000..e594fbc8d96f --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_REG_H__ +#define __PX210_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define PX210_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define PX210_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define PX210_DCREQ_PLANE0_ADDR_START 0x00 +#define PX210_DCREQ_PLANE0_ADDR_END 0x04 +#define PX210_DCREQ_PLANE1_ADDR_START 0x08 +#define PX210_DCREQ_PLANE1_ADDR_END 0x0c +#define PX210_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PX210_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define PX210_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define PX210_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define PX210_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define PX210_DCREQ_FRAME_START 0x54 +#define PX210_DCREQ_FILTER_CONFIG 0x58 +#define PX210_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define PX210_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define PX210_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PX210_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define PX210_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PX210_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define PX210_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PX210_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PX210_PHY1_EN_REFCLK 0x100070 + +#define PX210_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PX210_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PX210_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PX210_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PX210_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PX210_PHY0_PMA0_POWER 0x40014 +#define PX210_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PX210_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PX210_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PX210_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PX210_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PX210_PHY0_PLL_CFG 0x30038 +#define PX210_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PX210_PHY0_PMA_CONTROL 0x3800c +#define PX210_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PX210_PHY0_PMA_CONTROL2 0x38004 +#define PX210_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define PX210_PHY0_PLL0_CLK_SEL 0X684 +#define PX210_PHY0_PLL1_CLK_SEL 0x704 +#define PX210_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PX210_PHY0_HSCLK0_SEL 0x18398 +#define PX210_PHY0_HSCLK1_SEL 0x1a398 +#define PX210_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PX210_PHY0_HSCLK0_DIV 0x1839c +#define PX210_PHY0_HSCLK1_DIV 0x1a39c +#define PX210_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PX210_PHY0_PLLDRC0_CTRL 0x18394 +#define PX210_PHY0_PLLDRC1_CTRL 0x1a394 +#define PX210_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PX210_PHY0_PLL0_DSM_M0 0x250 +#define PX210_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PX210_PHY0_PLL0_VCOCAL_START 0x218 +#define PX210_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PX210_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PX210_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL0_CP_PADJ 0x690 +#define PX210_PHY0_PLL0_CP_IADJ 0x694 +#define PX210_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PX210_PHY0_PLL0_INTDIV 0x240 +#define PX210_PHY0_PLL0_FRACDIVL 0x244 +#define PX210_PHY0_PLL0_FRACDIVH 0x248 +#define PX210_PHY0_PLL0_HIGH_THR 0x24c +#define PX210_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PX210_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PX210_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PX210_PHY0_PLL1_CP_PADJ 0x710 +#define PX210_PHY0_PLL1_CP_IADJ 0x714 +#define PX210_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PX210_PHY0_PLL1_INTDIV 0x340 +#define PX210_PHY0_PLL1_FRACDIVL 0x344 +#define PX210_PHY0_PLL1_FRACDIVH 0x348 +#define PX210_PHY0_PLL1_HIGH_THR 0x34c +#define PX210_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PX210_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PX210_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PX210_PHY1_PLL0_CP_PADJ 0x80690 +#define PX210_PHY1_PLL0_CP_IADJ 0x80694 +#define PX210_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PX210_PHY1_PLL0_INTDIV 0x80240 +#define PX210_PHY1_PLL0_FRACDIVL 0x80244 +#define PX210_PHY1_PLL0_FRACDIVH 0x80248 +#define PX210_PHY1_PLL0_HIGH_THR 0x8024c +#define PX210_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PX210_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PX210_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PX210_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PX210_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PX210_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PX210_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PX210_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PX210_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PX210_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PX210_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PX210_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PX210_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PX210_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PX210_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PX210_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PX210_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PX210_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PX210_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PX210_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PX210_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PX210_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PX210_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PX210_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PX210_PHY0_PLL0_TX_DRV 0x18318 +#define PX210_PHY0_PLL1_TX_DRV 0x1a318 +#define PX210_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PX210_PHY0_PLL0_TX_MGNFS 0x18140 +#define PX210_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PX210_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PX210_PHY0_PLL0_TX_CPOST 0x18130 +#define PX210_PHY0_PLL1_TX_CPOST 0x1a130 +#define PX210_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PX210_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __PX210_REG_H__ */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0e90ad63dace..c9820a40b5b7 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3217,4 +3217,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ -- Gitee From 1f124f2d6141cc4a15293ab09dba21f96e1c705d Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 3 Apr 2024 15:26:36 +0800 Subject: [PATCH 690/953] anolis: configs: adjust loongarch configs ANBZ: #8598 Enable some important kconfigs for better compatibility with x86 and arm architectures. Here are the adjusted configs: CONFIG_PSI=y CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FUNCTION_TRACER=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_BPF_JIT=y CONFIG_CRYPTO_SM4=y CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_SM3_GENERIC=y CONFIG_KPROBES=y CONFIG_KRETPROBES=y CONFIG_SCSI_SAS_ATA=y CONFIG_KVM=y CONFIG_ZSMALLOC=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_RTC_SYSTOHC=y CONFIG_ARCH_IOREMAP=y CONFIG_ARCH_WRITECOMBINE=y CONFIG_CPU_HAS_LBT=y CONFIG_DWMAC_LOONGSON=m These kconfigs are adjusted by follow steps: 1. `ARCH=loongarch CROSS_COMPILE=scripts/dummy-tools/ make anolis_defconfig` 2. `ARCH=loongarch CROSS_COMPILE=scripts/dummy-tools/ ./scripts/kconfig/merge_config.sh .config adjust-configs` Signed-off-by: Qiao Ma Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3002 --- arch/loongarch/configs/anolis-debug_defconfig | 6664 ++++++++++++++++- arch/loongarch/configs/anolis_defconfig | 6664 ++++++++++++++++- 2 files changed, 13312 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index db41cbf5efd4..365f27c124b4 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -1,147 +1,904 @@ # -## Automatically generated file; DO NOT EDIT. -## Linux/loongarch 6.6.7 Kernel Configuration -## +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# CONFIG_PACKET=y CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_INET_RAW_DIAG=m CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_NV=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_TCP_CONG_LP=m @@ -151,6 +908,8 @@ CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_CONG_DCTCP=m CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y @@ -163,9 +922,17 @@ CONFIG_INET6_ESPINTCP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y @@ -173,21 +940,53 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set CONFIG_NETLABEL=y CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m @@ -198,6 +997,16 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y @@ -214,41 +1023,67 @@ CONFIG_NFT_TUNNEL=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CGROUP=m @@ -260,11 +1095,14 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m @@ -284,6 +1122,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -291,7 +1130,10 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m @@ -311,11 +1153,21 @@ CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -329,13 +1181,43 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_MH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -356,8 +1238,20 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -379,6 +1273,9 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m @@ -405,19 +1302,43 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_RDS_DEBUG=y CONFIG_TIPC=m CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m CONFIG_ATM=m CONFIG_ATM_CLIP=m CONFIG_ATM_CLIP_NO_ICMP=y @@ -426,30 +1347,46 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m CONFIG_NET_DSA_TAG_DSA=m CONFIG_NET_DSA_TAG_EDSA=m CONFIG_NET_DSA_TAG_MTK=m CONFIG_NET_DSA_TAG_KSZ=m CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set CONFIG_NET_DSA_TAG_QCA=m CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set CONFIG_NET_DSA_TAG_LAN9303=m CONFIG_NET_DSA_TAG_SJA1105=m CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m CONFIG_LLC2=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -459,12 +1396,18 @@ CONFIG_X25=m CONFIG_LAPB=m CONFIG_PHONET=m CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set # CONFIG_6LOWPAN_NHC is not set CONFIG_IEEE802154=m CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m CONFIG_IEEE802154_6LOWPAN=m CONFIG_MAC802154=m CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -476,6 +1419,7 @@ CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m CONFIG_NET_SCH_TAPRIO=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m @@ -495,7 +1439,18 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m @@ -508,11 +1463,13 @@ CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=m CONFIG_NET_EMATCH_NBYTE=m CONFIG_NET_EMATCH_U32=m CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set CONFIG_NET_EMATCH_IPSET=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y @@ -541,29 +1498,70 @@ CONFIG_NET_IFE_SKBMARK=m CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=y CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=y CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_NET_NCSI=y CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set CONFIG_BT=m +CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -572,95 +1570,514 @@ CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y # CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set CONFIG_PCCARD=m # CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set CONFIG_RAPIDIO_ENUM_BASIC=m CONFIG_RAPIDIO_CHMAN=m CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# CONFIG_MTD_CFI=m CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y CONFIG_MTD_CFI_INTELEXT=m CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m @@ -668,57 +2085,183 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set CONFIG_APDS9802ALS=m CONFIG_ISL29003=m CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_SCSI_BNX2X_FCOE=m CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -726,25 +2269,141 @@ CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -752,33 +2411,57 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m CONFIG_DM_LOG_WRITES=m CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set CONFIG_ISCSI_TARGET=m CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 CONFIG_FUSION_CTL=m CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set CONFIG_NET_FC=y CONFIG_IFB=m CONFIG_NET_TEAM=m @@ -789,89 +2472,210 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y CONFIG_NTB_NETDEV=m CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_NET_VRF=m CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set # CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set # CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set CONFIG_BNX2=y +CONFIG_CNIC=m CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set # CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set # CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y # CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m +CONFIG_IGB_HWMON=y CONFIG_IGBVF=m CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set # CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y # CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -882,23 +2686,54 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y CONFIG_SFP=y + +# +# MII PHY device drivers +# CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m CONFIG_CICADA_PHY=m CONFIG_CORTINA_PHY=m CONFIG_DAVICOM_PHY=m @@ -908,43 +2743,127 @@ CONFIG_INTEL_XWAY_PHY=m CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m CONFIG_MICROCHIP_T1_PHY=m CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m CONFIG_RENESAS_PHY=m CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_TERANETICS_PHY=m CONFIG_DP83822_PHY=m CONFIG_DP83TC811_PHY=m CONFIG_DP83848_PHY=m CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -953,31 +2872,47 @@ CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m +CONFIG_SLHC=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set @@ -990,41 +2925,155 @@ CONFIG_USB_NET_KALMIA=m CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set # CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y CONFIG_BRCMFMAC_USB=y CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set # CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + # CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m CONFIG_MWIFIEX_PCIE=m CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y CONFIG_RT2800USB_RT3573=y CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -1034,29 +3083,77 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m # CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set # CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set CONFIG_HDLC_CISCO=m CONFIG_HDLC_FR=m CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_USB4_NET=m CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y CONFIG_MISDN=m CONFIG_MISDN_DSP=m CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# CONFIG_MISDN_HFCPCI=m CONFIG_MISDN_HFCMULTI=m CONFIG_MISDN_HFCUSB=m @@ -1065,161 +3162,777 @@ CONFIG_MISDN_SPEEDFAX=m CONFIG_MISDN_INFINEON=m CONFIG_MISDN_W6692=m CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_APPLETOUCH=m CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y CONFIG_MOUSE_ELAN_I2C_SMBUS=y CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m CONFIG_IPMI_SSIF=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set CONFIG_I2C_AMD756=m CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set CONFIG_I2C_TINY_USB=m CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m CONFIG_PPS_CLIENT_PARPORT=m CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m CONFIG_SENSORS_ADM1026=m CONFIG_SENSORS_ADM1029=m CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -1237,72 +3950,322 @@ CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set CONFIG_SENSORS_MAX34440=m CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# CONFIG_SOFT_WATCHDOG=m CONFIG_GPIO_WATCHDOG=m CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set CONFIG_RC_CORE=m CONFIG_LIRC=y +CONFIG_RC_MAP=m CONFIG_RC_DECODERS=y CONFIG_IR_IMON_DECODER=m CONFIG_IR_JVC_DECODER=m @@ -1310,6 +4273,7 @@ CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m @@ -1317,31 +4281,114 @@ CONFIG_IR_XMP_DECODER=m CONFIG_RC_DEVICES=y CONFIG_IR_ENE=m CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set CONFIG_IR_IGUANA=m CONFIG_IR_IMON=m CONFIG_IR_IMON_RAW=m CONFIG_IR_ITE_CIR=m CONFIG_IR_MCEUSB=m CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set CONFIG_IR_REDRAT3=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set CONFIG_IR_TTUSBIR=m CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set CONFIG_USB_PULSE8_CEC=m CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + CONFIG_MEDIA_SUPPORT=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_GSPCA=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m CONFIG_USB_GSPCA_CONEX=m CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m @@ -1368,10 +4415,12 @@ CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m CONFIG_USB_GSPCA_VICAM=m @@ -1381,31 +4430,62 @@ CONFIG_USB_GL860=m CONFIG_USB_M5602=m CONFIG_USB_STV06XX=m CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_HDPVR=m CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set CONFIG_DVB_USB_V2=m CONFIG_DVB_USB_AF9035=m CONFIG_DVB_USB_ANYSEE=m CONFIG_DVB_USB_AU6610=m CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_GL861=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set CONFIG_DVB_USB_A800=m CONFIG_DVB_USB_AF9005=m CONFIG_DVB_USB_AF9005_REMOTE=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set CONFIG_DVB_USB_DIBUSB_MC=m CONFIG_DVB_USB_DIGITV=m CONFIG_DVB_USB_DTT200U=m @@ -1424,103 +4504,1042 @@ CONFIG_DVB_USB_VP7045=m CONFIG_SMS_USB_DRV=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set CONFIG_VIDEO_CX23885=m CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_DVB=m # CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set CONFIG_DVB_DM1105=m CONFIG_MANTIS_CORE=m CONFIG_DVB_MANTIS=m CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set CONFIG_DVB_NGENE=m CONFIG_DVB_PLUTO2=m CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set CONFIG_DVB_BUDGET_CORE=m CONFIG_DVB_BUDGET=m CONFIG_DVB_BUDGET_CI=m CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m CONFIG_DRM_AST=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# # CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 # CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y CONFIG_SND_AD1889=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y CONFIG_SND_CTXFI=m CONFIG_SND_DARLA20=m CONFIG_SND_GINA20=m @@ -1538,6 +5557,7 @@ CONFIG_SND_INDIGOIOX=m CONFIG_SND_INDIGODJX=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m CONFIG_SND_ICE1724=m @@ -1547,7 +5567,9 @@ CONFIG_SND_KORG1212=m CONFIG_SND_LOLA=m CONFIG_SND_LX6464ES=m CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m @@ -1555,35 +5577,68 @@ CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_ANALOG=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_VIA=m CONFIG_SND_HDA_CODEC_HDMI=m CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set CONFIG_SND_HDA_CODEC_CONEXANT=m CONFIG_SND_HDA_CODEC_CA0110=m CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m # CONFIG_SND_SPI is not set +CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_6FIRE=m CONFIG_SND_USB_HIFACE=m CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m CONFIG_SND_USB_POD=m CONFIG_SND_USB_PODHD=m CONFIG_SND_USB_TONEPORT=m CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m CONFIG_SND_DICE=m CONFIG_SND_OXFW=m CONFIG_SND_ISIGHT=m @@ -1594,36 +5649,306 @@ CONFIG_SND_FIREWIRE_TASCAM=m CONFIG_SND_FIREWIRE_MOTU=m CONFIG_SND_FIREFACE=m CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set CONFIG_HID_APPLE=m CONFIG_HID_APPLEIR=m CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set CONFIG_HID_ELAN=m CONFIG_HID_ELECOM=m CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set CONFIG_HID_GEMBIRD=m CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set CONFIG_HID_GT683R=m CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -1631,69 +5956,169 @@ CONFIG_HID_JABRA=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=m CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set CONFIG_HID_NTI=m CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PENMOUNT=m CONFIG_HID_PETALYNX=m CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=m CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set CONFIG_HID_SONY=m CONFIG_SONY_FF=y CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m CONFIG_HID_RMI=m CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=y CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +# end of USB HID support + CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y +CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m @@ -1707,12 +6132,40 @@ CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m + +# +# USB Imaging devices +# CONFIG_USB_MDC800=m CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set CONFIG_USB_SERIAL_AIRCABLE=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m @@ -1728,6 +6181,7 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set CONFIG_USB_SERIAL_F8153X=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m @@ -1737,6 +6191,7 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -1753,14 +6208,21 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_QT2=m CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# CONFIG_USB_USS720=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m @@ -1768,138 +6230,541 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set CONFIG_USB_IDMOUSE=m CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# CONFIG_MEMSTICK_TIFM_MS=m CONFIG_MEMSTICK_JMICRON_38X=m CONFIG_MEMSTICK_R592=m CONFIG_MEMSTICK_REALTEK_PCI=m CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m CONFIG_RTC_DRV_M41T80=m CONFIG_RTC_DRV_M41T80_WDT=y CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set CONFIG_RTC_DRV_RV3029C2=m # CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set CONFIG_COMEDI_PCI_DRIVERS=m CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set CONFIG_COMEDI_ADL_PCI6208=m CONFIG_COMEDI_ADL_PCI7X3X=m CONFIG_COMEDI_ADL_PCI8164=m @@ -1911,39 +6776,916 @@ CONFIG_COMEDI_ADV_PCI1723=m CONFIG_COMEDI_ADV_PCI1724=m CONFIG_COMEDI_ADV_PCI1760=m CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set CONFIG_COMEDI_NI_LABPC_PCI=m CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set CONFIG_HID_SENSOR_ALS=m CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set CONFIG_NTB_PINGPONG=m CONFIG_NTB_TOOL=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -1951,105 +7693,289 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y # CONFIG_NFS_V2 is not set CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y # CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -2100,44 +8026,207 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_INFINIBAND=y CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y CONFIG_LSM_MMAP_MIN_ADDR=65535 CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set CONFIG_IMA_READ_POLICY=y CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -2145,59 +8234,618 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# # CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y # CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index db41cbf5efd4..365f27c124b4 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -1,147 +1,904 @@ # -## Automatically generated file; DO NOT EDIT. -## Linux/loongarch 6.6.7 Kernel Configuration -## +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# CONFIG_PACKET=y CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_INET_RAW_DIAG=m CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_NV=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_TCP_CONG_LP=m @@ -151,6 +908,8 @@ CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_CONG_DCTCP=m CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y @@ -163,9 +922,17 @@ CONFIG_INET6_ESPINTCP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y @@ -173,21 +940,53 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set CONFIG_NETLABEL=y CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m @@ -198,6 +997,16 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y @@ -214,41 +1023,67 @@ CONFIG_NFT_TUNNEL=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CGROUP=m @@ -260,11 +1095,14 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m @@ -284,6 +1122,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -291,7 +1130,10 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m @@ -311,11 +1153,21 @@ CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -329,13 +1181,43 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_MH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -356,8 +1238,20 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -379,6 +1273,9 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m @@ -405,19 +1302,43 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_RDS_DEBUG=y CONFIG_TIPC=m CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m CONFIG_ATM=m CONFIG_ATM_CLIP=m CONFIG_ATM_CLIP_NO_ICMP=y @@ -426,30 +1347,46 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m CONFIG_NET_DSA_TAG_DSA=m CONFIG_NET_DSA_TAG_EDSA=m CONFIG_NET_DSA_TAG_MTK=m CONFIG_NET_DSA_TAG_KSZ=m CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set CONFIG_NET_DSA_TAG_QCA=m CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set CONFIG_NET_DSA_TAG_LAN9303=m CONFIG_NET_DSA_TAG_SJA1105=m CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m CONFIG_LLC2=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -459,12 +1396,18 @@ CONFIG_X25=m CONFIG_LAPB=m CONFIG_PHONET=m CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set # CONFIG_6LOWPAN_NHC is not set CONFIG_IEEE802154=m CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m CONFIG_IEEE802154_6LOWPAN=m CONFIG_MAC802154=m CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -476,6 +1419,7 @@ CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m CONFIG_NET_SCH_TAPRIO=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m @@ -495,7 +1439,18 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m @@ -508,11 +1463,13 @@ CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=m CONFIG_NET_EMATCH_NBYTE=m CONFIG_NET_EMATCH_U32=m CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set CONFIG_NET_EMATCH_IPSET=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y @@ -541,29 +1498,70 @@ CONFIG_NET_IFE_SKBMARK=m CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=y CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=y CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_NET_NCSI=y CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set CONFIG_BT=m +CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -572,95 +1570,514 @@ CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y # CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set CONFIG_PCCARD=m # CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set CONFIG_RAPIDIO_ENUM_BASIC=m CONFIG_RAPIDIO_CHMAN=m CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# CONFIG_MTD_CFI=m CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y CONFIG_MTD_CFI_INTELEXT=m CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m @@ -668,57 +2085,183 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set CONFIG_APDS9802ALS=m CONFIG_ISL29003=m CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_SCSI_BNX2X_FCOE=m CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -726,25 +2269,141 @@ CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -752,33 +2411,57 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m CONFIG_DM_LOG_WRITES=m CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set CONFIG_ISCSI_TARGET=m CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 CONFIG_FUSION_CTL=m CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set CONFIG_NET_FC=y CONFIG_IFB=m CONFIG_NET_TEAM=m @@ -789,89 +2472,210 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y CONFIG_NTB_NETDEV=m CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_NET_VRF=m CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set # CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set # CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set CONFIG_BNX2=y +CONFIG_CNIC=m CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set # CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set # CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y # CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m +CONFIG_IGB_HWMON=y CONFIG_IGBVF=m CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set # CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y # CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -882,23 +2686,54 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y CONFIG_SFP=y + +# +# MII PHY device drivers +# CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m CONFIG_CICADA_PHY=m CONFIG_CORTINA_PHY=m CONFIG_DAVICOM_PHY=m @@ -908,43 +2743,127 @@ CONFIG_INTEL_XWAY_PHY=m CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m CONFIG_MICROCHIP_T1_PHY=m CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m CONFIG_RENESAS_PHY=m CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_TERANETICS_PHY=m CONFIG_DP83822_PHY=m CONFIG_DP83TC811_PHY=m CONFIG_DP83848_PHY=m CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -953,31 +2872,47 @@ CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m +CONFIG_SLHC=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set @@ -990,41 +2925,155 @@ CONFIG_USB_NET_KALMIA=m CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set # CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y CONFIG_BRCMFMAC_USB=y CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set # CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + # CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m CONFIG_MWIFIEX_PCIE=m CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y CONFIG_RT2800USB_RT3573=y CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -1034,29 +3083,77 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m # CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set # CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set CONFIG_HDLC_CISCO=m CONFIG_HDLC_FR=m CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_USB4_NET=m CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y CONFIG_MISDN=m CONFIG_MISDN_DSP=m CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# CONFIG_MISDN_HFCPCI=m CONFIG_MISDN_HFCMULTI=m CONFIG_MISDN_HFCUSB=m @@ -1065,161 +3162,777 @@ CONFIG_MISDN_SPEEDFAX=m CONFIG_MISDN_INFINEON=m CONFIG_MISDN_W6692=m CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_APPLETOUCH=m CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y CONFIG_MOUSE_ELAN_I2C_SMBUS=y CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m CONFIG_IPMI_SSIF=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set CONFIG_I2C_AMD756=m CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set CONFIG_I2C_TINY_USB=m CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m CONFIG_PPS_CLIENT_PARPORT=m CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m CONFIG_SENSORS_ADM1026=m CONFIG_SENSORS_ADM1029=m CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -1237,72 +3950,322 @@ CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set CONFIG_SENSORS_MAX34440=m CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# CONFIG_SOFT_WATCHDOG=m CONFIG_GPIO_WATCHDOG=m CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set CONFIG_RC_CORE=m CONFIG_LIRC=y +CONFIG_RC_MAP=m CONFIG_RC_DECODERS=y CONFIG_IR_IMON_DECODER=m CONFIG_IR_JVC_DECODER=m @@ -1310,6 +4273,7 @@ CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m @@ -1317,31 +4281,114 @@ CONFIG_IR_XMP_DECODER=m CONFIG_RC_DEVICES=y CONFIG_IR_ENE=m CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set CONFIG_IR_IGUANA=m CONFIG_IR_IMON=m CONFIG_IR_IMON_RAW=m CONFIG_IR_ITE_CIR=m CONFIG_IR_MCEUSB=m CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set CONFIG_IR_REDRAT3=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set CONFIG_IR_TTUSBIR=m CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set CONFIG_USB_PULSE8_CEC=m CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + CONFIG_MEDIA_SUPPORT=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_GSPCA=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m CONFIG_USB_GSPCA_CONEX=m CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m @@ -1368,10 +4415,12 @@ CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m CONFIG_USB_GSPCA_VICAM=m @@ -1381,31 +4430,62 @@ CONFIG_USB_GL860=m CONFIG_USB_M5602=m CONFIG_USB_STV06XX=m CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_HDPVR=m CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set CONFIG_DVB_USB_V2=m CONFIG_DVB_USB_AF9035=m CONFIG_DVB_USB_ANYSEE=m CONFIG_DVB_USB_AU6610=m CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_GL861=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set CONFIG_DVB_USB_A800=m CONFIG_DVB_USB_AF9005=m CONFIG_DVB_USB_AF9005_REMOTE=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set CONFIG_DVB_USB_DIBUSB_MC=m CONFIG_DVB_USB_DIGITV=m CONFIG_DVB_USB_DTT200U=m @@ -1424,103 +4504,1042 @@ CONFIG_DVB_USB_VP7045=m CONFIG_SMS_USB_DRV=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set CONFIG_VIDEO_CX23885=m CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_DVB=m # CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set CONFIG_DVB_DM1105=m CONFIG_MANTIS_CORE=m CONFIG_DVB_MANTIS=m CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set CONFIG_DVB_NGENE=m CONFIG_DVB_PLUTO2=m CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set CONFIG_DVB_BUDGET_CORE=m CONFIG_DVB_BUDGET=m CONFIG_DVB_BUDGET_CI=m CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m CONFIG_DRM_AST=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# # CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 # CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y CONFIG_SND_AD1889=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y CONFIG_SND_CTXFI=m CONFIG_SND_DARLA20=m CONFIG_SND_GINA20=m @@ -1538,6 +5557,7 @@ CONFIG_SND_INDIGOIOX=m CONFIG_SND_INDIGODJX=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m CONFIG_SND_ICE1724=m @@ -1547,7 +5567,9 @@ CONFIG_SND_KORG1212=m CONFIG_SND_LOLA=m CONFIG_SND_LX6464ES=m CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m @@ -1555,35 +5577,68 @@ CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_ANALOG=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_VIA=m CONFIG_SND_HDA_CODEC_HDMI=m CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set CONFIG_SND_HDA_CODEC_CONEXANT=m CONFIG_SND_HDA_CODEC_CA0110=m CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m # CONFIG_SND_SPI is not set +CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_6FIRE=m CONFIG_SND_USB_HIFACE=m CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m CONFIG_SND_USB_POD=m CONFIG_SND_USB_PODHD=m CONFIG_SND_USB_TONEPORT=m CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m CONFIG_SND_DICE=m CONFIG_SND_OXFW=m CONFIG_SND_ISIGHT=m @@ -1594,36 +5649,306 @@ CONFIG_SND_FIREWIRE_TASCAM=m CONFIG_SND_FIREWIRE_MOTU=m CONFIG_SND_FIREFACE=m CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set CONFIG_HID_APPLE=m CONFIG_HID_APPLEIR=m CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set CONFIG_HID_ELAN=m CONFIG_HID_ELECOM=m CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set CONFIG_HID_GEMBIRD=m CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set CONFIG_HID_GT683R=m CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -1631,69 +5956,169 @@ CONFIG_HID_JABRA=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=m CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set CONFIG_HID_NTI=m CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PENMOUNT=m CONFIG_HID_PETALYNX=m CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=m CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set CONFIG_HID_SONY=m CONFIG_SONY_FF=y CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m CONFIG_HID_RMI=m CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=y CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +# end of USB HID support + CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y +CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m @@ -1707,12 +6132,40 @@ CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m + +# +# USB Imaging devices +# CONFIG_USB_MDC800=m CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set CONFIG_USB_SERIAL_AIRCABLE=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m @@ -1728,6 +6181,7 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set CONFIG_USB_SERIAL_F8153X=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m @@ -1737,6 +6191,7 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -1753,14 +6208,21 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_QT2=m CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# CONFIG_USB_USS720=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m @@ -1768,138 +6230,541 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set CONFIG_USB_IDMOUSE=m CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# CONFIG_MEMSTICK_TIFM_MS=m CONFIG_MEMSTICK_JMICRON_38X=m CONFIG_MEMSTICK_R592=m CONFIG_MEMSTICK_REALTEK_PCI=m CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m CONFIG_RTC_DRV_M41T80=m CONFIG_RTC_DRV_M41T80_WDT=y CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set CONFIG_RTC_DRV_RV3029C2=m # CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set CONFIG_COMEDI_PCI_DRIVERS=m CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set CONFIG_COMEDI_ADL_PCI6208=m CONFIG_COMEDI_ADL_PCI7X3X=m CONFIG_COMEDI_ADL_PCI8164=m @@ -1911,39 +6776,916 @@ CONFIG_COMEDI_ADV_PCI1723=m CONFIG_COMEDI_ADV_PCI1724=m CONFIG_COMEDI_ADV_PCI1760=m CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set CONFIG_COMEDI_NI_LABPC_PCI=m CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set CONFIG_HID_SENSOR_ALS=m CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set CONFIG_NTB_PINGPONG=m CONFIG_NTB_TOOL=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -1951,105 +7693,289 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y # CONFIG_NFS_V2 is not set CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y # CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -2100,44 +8026,207 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_INFINIBAND=y CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y CONFIG_LSM_MMAP_MIN_ADDR=65535 CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set CONFIG_IMA_READ_POLICY=y CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -2145,59 +8234,618 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# # CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y # CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking -- Gitee From 2a94b1471e2fb2387f8199c8e7fbbd150da587af Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Fri, 22 Jul 2022 16:23:56 +0800 Subject: [PATCH 691/953] anolis: arm_mpam: Fix L3 cache size display error in resctrl fs ANBZ: #8686 The value of L3 cache in the size file is 0 now. $ cat /sys/fs/resctrl/size MB:0=100;1=100 L3:0=0;1=0 We fix it in this patch. Signed-off-by: Xin Hao Tested-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index c7ebb4d37b47..6d25e98a757d 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -739,6 +739,8 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) res->resctrl_res.rid == RDT_RESOURCE_L3) { bool has_csu = cache_has_usable_csu(class); + r->cache_level = class->level; + /* TODO: Scaling is not yet supported */ r->cache.cbm_len = class->props.cpbm_wd; r->cache.arch_has_sparse_bitmasks = true; -- Gitee From 1355123a48e6e2ff1b6677871fae26570ed7661d Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 25 Jul 2022 11:33:06 +0800 Subject: [PATCH 692/953] anolis: arm_mpam: Remove the zero padding for MB percentage showing in schemata ANBZ: #8686 Zero padding is used for MB percentage showing in the schemata file on ARM64 machines, which is different from the x86_64 machines. This patch removes it. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 6d25e98a757d..41ae848b6f61 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -790,7 +790,7 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) struct mpam_props *cprops = &class->props; /* TODO: kill these properties off as they are derivatives */ - r->format_str = "%d=%0*u"; + r->format_str = "%d=%*u"; r->fflags = RFTYPE_RES_MB; r->default_ctrl = MAX_MBA_BW; r->data_width = 3; -- Gitee From 320f9b9ce63ffe4454a7310bf0fcb44bf730fccc Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Fri, 22 Jul 2022 17:36:53 +0800 Subject: [PATCH 693/953] anolis: arm_mpam: Fix MB default percentage error in schemata ANBZ: #8686 When creating a new group, the default percentage of MB resource in schemata is 97 instead of 100. This patch fixes it. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 41ae848b6f61..dfecac759f41 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -539,10 +539,17 @@ static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) for (bit = 15; bit; bit--) { if (mbw_max & BIT(bit)) - value += MAX_MBA_BW / divisor; + /* + * Left shift by 16 bits to preserve the precision of + * the division operation. + */ + value += (MAX_MBA_BW << 16) / divisor; divisor <<= 1; } + /* Use the upper bound of the fixed-point fraction. */ + value = (value + (MAX_MBA_BW << (16 - cprops->bwa_wd))) >> 16; + return value; } @@ -561,23 +568,29 @@ static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) { u8 bit; - u32 divisor = 2, value = 0; + u32 pc_ls, divisor = 2, value = 0; if (WARN_ON_ONCE(cprops->bwa_wd > 15)) return MAX_MBA_BW; + /* + * Left shift by 16 bits to preserve the precision of the division + * operation. + */ + pc_ls = (u32) pc << 16; + for (bit = 15; bit; bit--) { - if (pc >= MAX_MBA_BW / divisor) { - pc -= MAX_MBA_BW / divisor; + if (pc_ls >= (MAX_MBA_BW << 16) / divisor) { + pc_ls -= (MAX_MBA_BW << 16) / divisor; value |= BIT(bit); } divisor <<= 1; - if (!pc || !(MAX_MBA_BW / divisor)) + if (!pc_ls || !((MAX_MBA_BW << 16) / divisor)) break; } - value &= GENMASK(15, 15 - cprops->bwa_wd); + value &= GENMASK(15, 15 - cprops->bwa_wd + 1); return value; } -- Gitee From 433e333fd01b387786354e261f7c840e53932645 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 25 Jul 2022 18:00:07 +0800 Subject: [PATCH 694/953] anolis: arm_mpam: Fix the inaccurate MB granularity in resctrl info directory ANBZ: #8686 There is a problem with how the current MB granularity is calculated. By definition, cprops->bwa_wd should be used as an exponential rather than a linear value. Besides, since the minimum granularity cannot be less than 1% with current representation, when the granularity is too fine, 1 is returned. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index dfecac759f41..c53b89557b4d 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -515,7 +515,7 @@ static u32 get_mba_granularity(struct mpam_props *cprops) * bwa_wd is the number of bits implemented in the 0.xxx * fixed point fraction. 1 bit is 50%, 2 is 25% etc. */ - return MAX_MBA_BW / (cprops->bwa_wd + 1); + return max_t(u32, 1, (MAX_MBA_BW / BIT(cprops->bwa_wd))); } return 0; -- Gitee From 3aacac8022028ce1e3d833b8c87714d30393eaf6 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 27 Jul 2022 11:13:26 +0800 Subject: [PATCH 695/953] anolis: arm_mpam: Limit MB percentage in schemata to multiples of granularity ANBZ: #8686 The current setting of MB percentage is not tied to MB granularity. This patch sets MB percentage to a multiple of the bandwidth granularity, and limits the minimum bandwidth to the granularity value. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index c53b89557b4d..06af2d77b93e 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -568,11 +568,17 @@ static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) { u8 bit; - u32 pc_ls, divisor = 2, value = 0; + u32 granularity, pc_ls, divisor = 2, value = 0; if (WARN_ON_ONCE(cprops->bwa_wd > 15)) return MAX_MBA_BW; + /* Set the pc value to be a multiple of granularity. */ + granularity = get_mba_granularity(cprops); + pc = roundup(pc, (u8) granularity); + if (pc > 100) + pc = 100; + /* * Left shift by 16 bits to preserve the precision of the division * operation. @@ -811,6 +817,7 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) r->membw.delay_linear = true; r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; r->membw.bw_gran = get_mba_granularity(cprops); + r->membw.min_bw = r->membw.bw_gran; /* Round up to at least 1% */ if (!r->membw.bw_gran) -- Gitee From e647511070b54094253de8164220dde6b9c38257 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 24 Aug 2022 16:41:57 +0800 Subject: [PATCH 696/953] anolis: arm_mpam: Fix the problem that error interrupt handling cannot exit normally ANBZ: #8686 Current MPAM driver uses threaded irq when handling shared error interrupt. When an error occurs, the handler thread calls free_irq() to unregister MPAM's irqs while free_irq() needs to wait for the handler thread to exit, which causes a deadlock. To fix this problem, we change the bottom half from threaded irq to workqueue. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/656 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 33 ++++++++++------------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 32532a918115..f5f23725d13a 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1964,13 +1964,15 @@ static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", msc->id, mpam_errcode_names[errcode], partid, pmg, ris); - if (irq_is_percpu(irq)) { - mpam_disable_msc_ecr(msc); - schedule_work(&mpam_broken_work); - return IRQ_HANDLED; - } + /* + * To prevent this interrupt from repeatedly cancelling the scheduled + * work to disable mpam, disable the error interrupt. + */ + mpam_disable_msc_ecr(msc); - return IRQ_WAKE_THREAD; + schedule_work(&mpam_broken_work); + + return IRQ_HANDLED; } static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) @@ -1987,8 +1989,6 @@ static irqreturn_t mpam_spi_handler(int irq, void *dev_id) return __mpam_irq_handler(irq, msc); } -static irqreturn_t mpam_disable_thread(int irq, void *dev_id); - static int mpam_register_irqs(void) { int err, irq; @@ -2018,11 +2018,9 @@ static int mpam_register_irqs(void) true); mutex_unlock(&msc->lock); } else { - err = devm_request_threaded_irq(&msc->pdev->dev, irq, - &mpam_spi_handler, - &mpam_disable_thread, - IRQF_SHARED, - "mpam:msc:error", msc); + err = devm_request_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, IRQF_SHARED, + "mpam:msc:error", msc); if (err) return err; } @@ -2228,7 +2226,7 @@ void mpam_reset_class(struct mpam_class *class) * All of MPAMs errors indicate a software bug, restore any modified * controls to their reset values. */ -static irqreturn_t mpam_disable_thread(int irq, void *dev_id) +void mpam_disable(struct work_struct *ignored) { int idx; struct mpam_class *class; @@ -2250,13 +2248,6 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) list_for_each_entry_rcu(class, &mpam_classes, classes_list) mpam_reset_class(class); srcu_read_unlock(&mpam_srcu, idx); - - return IRQ_HANDLED; -} - -void mpam_disable(struct work_struct *ignored) -{ - mpam_disable_thread(0, NULL); } /* -- Gitee From 929621a52ae7ef79012bf7e7788b3beaab6a7a6e Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 28 Nov 2022 20:22:26 +0800 Subject: [PATCH 697/953] anolis: arm_mpam: Clear mpam_msc properly when driver probing failed ANBZ: #8686 When mpam_msc_drv_probe() fails to call mpam_acpi_parse_resources(), the current implementation will not delete the failed msc device from mpam_all_msc list. It will cause the following error when adding a new msc device to mpam_all_msc: list_add corruption. next->prev should be prev (ffff8000120a3228), but was ffff040000d99080. (next=ffff040000d99080) ... Call trace: __list_add_valid+0x98/Oxb0 mpam_msc_drv_probe+0x374/0x508 platform_drv_probe+0x58/0xa8 really_probe+0xc0/0x420 __driver_probe_device+0x114/0x188 river_probe_device+0x44/0xf8 ... Fix it by calling mpam_msc_drv_remove() when driver probing failed. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/925 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 43 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f5f23725d13a..12015951a63d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1688,6 +1688,24 @@ static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) /* TODO: wake up tasks blocked on this MSC's PCC channel */ } +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + static int mpam_msc_drv_probe(struct platform_device *pdev) { int err; @@ -1733,7 +1751,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = mpam_msc_setup_error_irq(msc); if (err) { - devm_kfree(&pdev->dev, msc); msc = ERR_PTR(err); break; } @@ -1749,7 +1766,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) &msc_res); if (IS_ERR(io)) { pr_err("Failed to map MSC base address\n"); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(io); break; } @@ -1766,7 +1782,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) msc->pcc_subspace_id); if (IS_ERR(msc->pcc_chan)) { pr_err("Failed to request MSC PCC channel\n"); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(msc->pcc_chan); break; } @@ -1777,7 +1792,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) if (IS_ERR(io)) { pr_err("Failed to map MSC base address\n"); pcc_mbox_free_channel(msc->pcc_chan); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(io); break; } @@ -1801,6 +1815,9 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = mpam_dt_parse_resources(msc, plat_data); } + if (err) + mpam_msc_drv_remove(pdev); + if (!err && fw_num_msc == mpam_num_msc) mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); @@ -2277,24 +2294,6 @@ void mpam_enable(struct work_struct *work) mpam_enable_once(); } -static int mpam_msc_drv_remove(struct platform_device *pdev) -{ - struct mpam_msc *msc = platform_get_drvdata(pdev); - - if (!msc) - return 0; - - mutex_lock(&mpam_list_lock); - mpam_num_msc--; - platform_set_drvdata(pdev, NULL); - list_del_rcu(&msc->glbl_list); - mpam_msc_destroy(msc); - synchronize_srcu(&mpam_srcu); - mutex_unlock(&mpam_list_lock); - - return 0; -} - struct mpam_write_config_arg { struct mpam_msc_ris *ris; struct mpam_component *comp; -- Gitee From b3ec0153b017f19291414cb63a5480258be6bf69 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 28 Nov 2022 19:34:01 +0800 Subject: [PATCH 698/953] anolis: arm_mpam: Fix wrong MMIO space size for msc platform device ANBZ: #8686 Current end field in struct resource for MPAM MSC's MMIO space is assigned a wrong value, which will cause resource_size() to return a size that is 1 greater than the real size. Fix it. However, current MSC MMIO space size in Yitian-710's BIOS is 0x1000, which will cause some registers cannot be accessed, such as MPAMCFG_CPBM. To avoid register access error, force the MMIO space size to be equal to or greater than MPAM_MIN_MMIO_SIZE. Since current maximum register offset is 0x2000, assign MPAM_MIN_MMIO_SIZE to 0x3000. Note that this patch will make MPAM on Yitian-710 unavailable, so you need to upgrade the BIOS to B137 or above. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/925 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 7 ++++++- drivers/platform/mpam/mpam_internal.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 12015951a63d..ad641f21d301 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1769,8 +1769,13 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = PTR_ERR(io); break; } - msc->mapped_hwpage_sz = msc_res->end - msc_res->start; + msc->mapped_hwpage_sz = msc_res->end - msc_res->start + 1; msc->mapped_hwpage = io; + if (msc->mapped_hwpage_sz < MPAM_MIN_MMIO_SIZE) { + pr_err("MSC MMIO space size is too small\n"); + err = -EINVAL; + break; + } } else if (msc->iface == MPAM_IFACE_PCC) { msc->pcc_cl.dev = &pdev->dev; msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 48554ff93e09..014524e5fa4f 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -321,6 +321,7 @@ void mpam_resctrl_exit(void); * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a */ #define MPAM_ARCHITECTURE_V1 0x10 +#define MPAM_MIN_MMIO_SIZE 0x3000 /* Memory mapped control pages: */ /* ID Register offsets in the memory mapped page */ -- Gitee From dfc0c77bf4af0a0af19ab65004e03ea914206ec8 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 17 Jan 2023 14:40:01 +0800 Subject: [PATCH 699/953] anolis: arm_mpam: Fix the wrong PARTID_MAX and PMG_MAX usage ANBZ: #8686 The PARTID_MAX and PMG_MAX fields in MPAM system and MMIO registers mean the largest index that can be used instead of the total number. Fix the wrong usage of these values. Besides, add READ_ONCE() to prevent compiler from optimizing mpam_pmg_max reading. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1090 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ad641f21d301..e22540111d5b 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1240,7 +1240,7 @@ static int mpam_reprogram_ris(void *_arg) spin_lock(&partid_max_lock); partid_max = mpam_partid_max; spin_unlock(&partid_max_lock); - for (partid = 0; partid < partid_max; partid++) + for (partid = 0; partid <= partid_max; partid++) mpam_reprogram_ris_partid(ris, partid, cfg); return 0; @@ -1396,7 +1396,7 @@ static void mpam_reprogram_msc(struct mpam_msc *msc) } reset = true; - for (partid = 0; partid < mpam_partid_max; partid++) { + for (partid = 0; partid <= mpam_partid_max; partid++) { cfg = &ris->comp->cfg[partid]; if (cfg->features) reset = false; @@ -2119,7 +2119,7 @@ static int __allocate_component_cfg(struct mpam_component *comp) if (comp->cfg) return 0; - comp->cfg = kcalloc(mpam_partid_max, sizeof(*comp->cfg), GFP_KERNEL); + comp->cfg = kcalloc(mpam_partid_max + 1, sizeof(*comp->cfg), GFP_KERNEL); if (!comp->cfg) return -ENOMEM; @@ -2220,7 +2220,7 @@ static void mpam_enable_once(void) mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", - READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); + READ_ONCE(mpam_partid_max) + 1, READ_ONCE(mpam_pmg_max) + 1); } void mpam_reset_class(struct mpam_class *class) @@ -2231,7 +2231,7 @@ void mpam_reset_class(struct mpam_class *class) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(comp, &class->components, class_list) { - memset(comp->cfg, 0, (mpam_partid_max * sizeof(*comp->cfg))); + memset(comp->cfg, 0, ((mpam_partid_max + 1) * sizeof(*comp->cfg))); list_for_each_entry_rcu(ris, &comp->ris, comp_list) { mutex_lock(&ris->msc->lock); -- Gitee From 6ac8d70bb1f980992f70dfefecd9ac46dbf396de Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 1 Aug 2023 22:59:59 +0800 Subject: [PATCH 700/953] anolis: arm_mpam: Fix schemata's display error of MB when CDP is enabled ANBZ: #8686 If the "MB" in a non-default ctrlmon group is changed when resctrl filesystem is mounted with cdp option, the schemata may display a value that is not consistent with the expected setting. The reason for this error is that MB resource only uses CDP_NONE as its resctrl_conf_type, that makes resctrl_get_config_index return a wrong PARTID when CDP is enabled. The right PARTID should be gotten by CDP_CODE and CDP_DATA. Since the MB setting will keep the same for both code and data, here only use the PARTID of CDP_CODE to get the real setting of MB resource if CDP is enabled. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1983 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 06af2d77b93e..bc26aaeebc76 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -931,7 +931,10 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); cprops = &res->class->props; - partid = resctrl_get_config_index(closid, type); + if (mpam_resctrl_hide_cdp(r->rid)) + partid = resctrl_get_config_index(closid, CDP_CODE); + else + partid = resctrl_get_config_index(closid, type); cfg = &dom->comp->cfg[partid]; switch (r->rid) { -- Gitee From 97b48865a985c659f837d285aad8c40eb1b53e97 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 2 Apr 2024 15:06:34 +0800 Subject: [PATCH 701/953] anolis: arm_mpam: Fix the wrong checking when using USE_RMID_IDX ANBZ: #8686 The mon field in struct mon_cfg is of type u16, whose max value is less than USE_RMID_IDX macro. When mon is assigned to USE_RMID_IDX, the value of cfg.mon will always be 0. The if statement will always be false, which is incorrect. Fix it by comparing the original u32 mon to USE_RMID_IDX first. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index bc26aaeebc76..25cf64386b64 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -359,9 +359,10 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return -EINVAL; } - cfg.mon = mon; - if (cfg.mon == USE_RMID_IDX) + if (mon == USE_RMID_IDX) cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + else + cfg.mon = mon; cfg.match_pmg = true; cfg.pmg = rmid; -- Gitee From da145f4b662770f07e87da53fbe777de6d2cda26 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 24 Jan 2024 18:05:47 +0800 Subject: [PATCH 702/953] anolis: ACPI / MPAM: Avoid MPAM MSC has the same identifier ANBZ: #8686 Use an extra msc_num as the MSC device id instead of msc->identifier, since MSC nodes with different types in MPAM ACPI table may have the same identifier value. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 8a63449f27b5..c1f9cafaf1e2 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -194,6 +194,7 @@ static int __init _parse_table(struct acpi_table_header *table) struct resource res[3]; char uid[16]; u32 acpi_id; + int msc_num = 0; table_end = (char *)table + table->length; @@ -220,7 +221,12 @@ static int __init _parse_table(struct acpi_table_header *table) memset(res, 0, sizeof(res)); memset(props, 0, sizeof(props)); - pdev = platform_device_alloc("mpam_msc", tbl_msc->identifier); + /* + * Use an extra msc_num instead of msc->identifier, since MSC + * nodes with different types in MPAM ACPI table may have the + * same id value. + */ + pdev = platform_device_alloc("mpam_msc", msc_num++); if (IS_ERR(pdev)) { err = PTR_ERR(pdev); break; -- Gitee From 09642b650bef3353cc772308a5d53e1130117b8e Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 9 May 2023 15:01:57 +0800 Subject: [PATCH 703/953] anolis: arm_mpam: Identify different types of machines for MPAM implementation specific features ANBZ: #8686 ARM MPAM allows different machines have different MPAM implementation-specific features. To avoid affecting MPAM standard features and distinguish different machines, introduce a new variable mpam_current_machine as a machine identifier, which is based on the information from MPAM ACPI table or the device tree. Now only Yitian710 is supported. Machines without specific features are not affected. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 [ refactor the code for kernel-6.6 ] Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 67 +++++++++++++++++++++++++++- drivers/platform/mpam/mpam_devices.c | 13 ++++++ include/linux/arm_mpam.h | 15 +++++++ 3 files changed, 93 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index c1f9cafaf1e2..92ecfc614d14 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -22,6 +22,26 @@ #define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) #define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) +/* Use OEM info in MPAM ACPI table to distinguish different machine types */ +struct acpi_mpam_machine_oem_info { + enum mpam_machine_type type; + char signature[ACPI_NAMESEG_SIZE + 1]; + u8 revision; + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct acpi_mpam_machine_oem_info acpi_mpam_machines[MPAM_NUM_MACHINE_TYPES] = { + [MPAM_YITIAN710] = { + .signature = "YMPM", + .revision = 0, + .oem_id = "PTG ", + .oem_table_id = "PTG01 ", + .oem_revision = 0, + }, +}; + static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, int *irq, u32 processor_container_uid) { @@ -297,18 +317,34 @@ static int __init _parse_table(struct acpi_table_header *table) static struct acpi_table_header *get_table(void) { struct acpi_table_header *table; + enum mpam_machine_type mtype; acpi_status status; if (acpi_disabled || !mpam_cpus_have_feature()) return NULL; - status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + mtype = acpi_mpam_get_machine_type(); + + if (mtype != MPAM_DEFAULT_MACHINE) + status = acpi_get_table(acpi_mpam_machines[mtype].signature, 0, &table); + else + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); if (ACPI_FAILURE(status)) return NULL; - if (table->revision != 1) + if (mtype == MPAM_DEFAULT_MACHINE && table->revision != 1) return NULL; + /* + * Kunpeng's MPAM ACPI adopts an older version of MPAM ACPI, so + * this MPAM ACPI driver is not suitable for Kunpeng platform. + * Skip it. + */ + if (!strncmp(table->oem_id, "HISI", 4)) { + acpi_put_table(table); + return NULL; + } + return table; } @@ -367,6 +403,33 @@ int acpi_mpam_count_msc(void) return ret; } +enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type ret; + acpi_status status; + int i; + + ret = MPAM_DEFAULT_MACHINE; + + for (i = MPAM_DEFAULT_MACHINE + 1; i < MPAM_NUM_MACHINE_TYPES; i++) { + status = acpi_get_table(acpi_mpam_machines[i].signature, 0, &table); + if (ACPI_FAILURE(status)) + continue; + + if (!memcmp(acpi_mpam_machines[i].oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(acpi_mpam_machines[i].oem_table_id, table->oem_table_id, + ACPI_OEM_TABLE_ID_SIZE) && + acpi_mpam_machines[i].oem_revision == table->oem_revision) { + ret = i; + } + + acpi_put_table(table); + } + + return ret; +} + /* * Call after ACPI devices have been created, which happens behind acpi_scan_init() * called from subsys_initcall(). PCC requires the mailbox driver, which is diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e22540111d5b..3351dc9d1b1b 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -45,6 +45,8 @@ static LIST_HEAD(mpam_all_msc); struct srcu_struct mpam_srcu; +enum mpam_machine_type mpam_current_machine; + /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; @@ -1561,6 +1563,12 @@ static int mpam_msc_setup_error_irq(struct mpam_msc *msc) return 0; } +static enum mpam_machine_type mpam_dt_get_machine_type(void) +{ + /* FIXME: not supported yet */ + return MPAM_DEFAULT_MACHINE; +} + static int mpam_dt_count_msc(void) { int count = 0; @@ -2386,6 +2394,11 @@ static int __init mpam_msc_driver_init(void) init_srcu_struct(&mpam_srcu); + if (!acpi_disabled) + mpam_current_machine = acpi_mpam_get_machine_type(); + else + mpam_current_machine = mpam_dt_get_machine_type(); + /* * If the MPAM CPU interface is not implemented, or reserved by * firmware, there is no point touching the rest of the hardware. diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 239d27af9e32..5423a2eff810 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -31,11 +31,22 @@ enum mpam_class_types { MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ }; +enum mpam_machine_type { + MPAM_DEFAULT_MACHINE, + MPAM_YITIAN710, + + MPAM_NUM_MACHINE_TYPES, +}; + +/* Machine identifier which can be used for vendor-specific MPAM features */ +extern enum mpam_machine_type mpam_current_machine; + #ifdef CONFIG_ACPI_MPAM /* Parse the ACPI description of resources entries for this MSC. */ int acpi_mpam_parse_resources(struct mpam_msc *msc, struct acpi_mpam_msc_node *tbl_msc); int acpi_mpam_count_msc(void); +enum mpam_machine_type acpi_mpam_get_machine_type(void); #else static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, struct acpi_mpam_msc_node *tbl_msc) @@ -43,6 +54,10 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, return -EINVAL; } static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +static inline enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + return MPAM_DEFAULT_MACHINE; +} #endif int mpam_register_requestor(u16 partid_max, u8 pmg_max); -- Gitee From 78914ea3aef6db5e96b693cc58bc2c2464eee6a3 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 26 Feb 2021 20:21:43 +0800 Subject: [PATCH 704/953] openEuler: ACPI / PPTT: Filthy hack to find _a_ backwards reference in the PPTT [ROTTEN] ANBZ: #8686 commit 0ed11dc41fe828a3d2b69220347b3a2ed9795ba9 openEuler. hulk inclusion category: feature feature: ARM MPAM support bugzilla: 48265 CVE: NA -------------------------------- The alpha MPAM table contains a pointer to the PPTT cache, which it expects to be unique, which isn't guaranteed. Ideally we'd take a cache-id, but the hardware doesn't have a suitable property, instead arm64 will generate an id from the cpu affinity ids. To find the cache id we need to find the cacheinfo structure, which we can do if we have a pptt cpu_node (different to the cache node), as this is the fw_token used to match the Processor Container that contains all the CPUs that share this cache. How can we find the expected-to-be-unique cpu_node from the cache_node? ... add acpi_pptt_find_cache_backwards() to find a PPTT processor node given a PPTT cache node. This is totally broken as many processor nodes may point at the same PPTT cache indicating different instances of the cache. (e.g. all the L1 caches are the same shape, but they aren't the same cache). This only works if you cooked your PPTT table to look like this. Signed-off-by: James Morse # ... but its still GPLv2 Signed-off-by: Wang ShaoBo Reviewed-by: Xie XiuQi Signed-off-by: Yang Yingliang Reviewed-by: Cheng Jian Signed-off-by: Zheng Zengkai Signed-off-by: Xin Hao Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 4 ++++ 2 files changed, 52 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 201df60e4a79..66b637502595 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -296,6 +296,54 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + /* parent_node points into the table, but the table isn't provided. */ static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, cpumask_t *cpus) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b14c81759fd8..33dad1d57e11 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1565,4 +1565,8 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache); + #endif /*_LINUX_ACPI_H*/ -- Gitee From db6057b2adcf3df18705b2935c121bd8903f8f5f Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Thu, 14 Jul 2022 14:53:12 +0800 Subject: [PATCH 705/953] anolis: ACPI / PPTT: Add a helper to fill a cpumask from a cache with specific id and level ANBZ: #8686 Function `acpi_pptt_get_cpumask_from_cache_id()` assume each cache has different cache id. But in the implementation, caches in different levels may have the same id. So we add a new helper function to get the cpumask of a cache with specific id and level. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 60 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 8 ++++++ 2 files changed, 68 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 66b637502595..fa40ed82c198 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -1134,3 +1134,63 @@ int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) acpi_put_table(table); return 0; } + +/** + * acpi_pptt_get_cpumask_from_cache_id_and_level() - Get the cpus associated with the + * cache specified by id and level + * @cache_id: The id field of the unified cache + * @cache_level: The level of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int cpu; + struct acpi_table_header *table; + struct acpi_pptt_cache *cache_node; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + cache_node = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + cache_level, &cpu_node); + + if (!cache_node) + continue; + + cpu_node = acpi_pptt_find_cache_backwards(table, cache_node); + if (cpu_node->acpi_processor_id == cache_id) + cpumask_set_cpu(cpu, cpus); + } + + acpi_put_table(table); + return 0; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 33dad1d57e11..827fbbfc1c94 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1498,6 +1498,8 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1533,6 +1535,12 @@ static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, { return -EINVAL; } +static inline int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, + u32 cache_level, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 00817449f48a07a0419bc9b6fa71f86d8b7eaad5 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 27 Jul 2022 15:42:18 +0800 Subject: [PATCH 706/953] anolis: ACPI / PPTT: Downgrade the revision requirement in acpi_pptt_get_cpumask_from_cache_id_and_level ANBZ: #8686 Since function `acpi_pptt_get_cpumask_from_cache_id_and_level()` does not really use the cache id in PPTT, in order to maintain compatibility with the old revision, we downgrade the revision requirement from 3 to 2. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/580 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index fa40ed82c198..56b53d45dadd 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -1168,7 +1168,11 @@ int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, return -ENOENT; } - if (table->revision < 3) { + /* + * FIXME: Since this function does not actually use the cache id in the + * PPTT table, we downgrade the revision requirement. + */ + if (table->revision < 2) { acpi_put_table(table); return -ENOENT; } -- Gitee From 9a373fb66336e267ce332f8cc9d722ce9419f0c1 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 23 Jan 2024 13:35:33 +0800 Subject: [PATCH 707/953] anolis: arm_mpam: Add cache msc info parsing for Yitian710 specific MPAM ACPI table ANBZ: #8686 Since Yitian710 uses an specific MPAM ACPI table for cache msc, which may contains the same cache id at different cache levels. So it is conflict with the code of git://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git. So we identify the current machine and use some specific functions to parse the cache level and its cpumasks for Yitian710 platform. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 17 +++++++++++++---- drivers/platform/mpam/mpam_devices.c | 6 +++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 92ecfc614d14..7a4bd70972c2 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -124,10 +124,19 @@ static int acpi_mpam_parse_resource(struct mpam_msc *msc, switch (res->locator_type) { case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: cache_id = res->locator.cache_locator.cache_reference; - level = find_acpi_cache_level_from_id(cache_id); - if (level < 0) { - pr_err_once("Bad level for cache with id %u\n", cache_id); - return level; + if (mpam_current_machine == MPAM_YITIAN710) { + /* + * YITIAN710's BIOS doesn't support find level from + * cache id. Since it only supports L3 cache, use a + * fixed value, 3. + */ + level = 3; + } else { + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } } return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, level, cache_id); diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 3351dc9d1b1b..b6672b7881f7 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -367,8 +367,12 @@ static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, int iter_cache_id; struct device_node *iter; - if (!acpi_disabled) + if (!acpi_disabled) { + if (mpam_current_machine == MPAM_YITIAN710) + return acpi_pptt_get_cpumask_from_cache_id_and_level( + cache_id, cache_level, affinity); return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + } for_each_possible_cpu(cpu) { iter = of_get_cpu_node(cpu, NULL); -- Gitee From b9e49fafce0922b67a8e6c4c130acd1ac5f26d7e Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 8 May 2023 16:05:32 +0800 Subject: [PATCH 708/953] anolis: arm_mpam: Add supportion for implementation-defined MB monitoring ANBZ: #8686 Since Yitian710 uses an implementation-defined MB monitoring feature, introduce a new feature named mpam_feat_impl_msmon_mbwu to differentiate it from the standard MB monitoring feature. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 4 ++ drivers/platform/mpam/mpam_devices.c | 56 ++++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 8 ++++ 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 7a4bd70972c2..153ef041abf0 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -22,6 +22,8 @@ #define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) #define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) +int ddrc_freq; + /* Use OEM info in MPAM ACPI table to distinguish different machine types */ struct acpi_mpam_machine_oem_info { enum mpam_machine_type type; @@ -141,6 +143,8 @@ static int acpi_mpam_parse_resource(struct mpam_msc *msc, return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, level, cache_id); case ACPI_MPAM_LOCATION_TYPE_MEMORY: + if (mpam_current_machine == MPAM_YITIAN710) + ddrc_freq = res->locator.memory_locator.reserved; return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, 255, res->locator.memory_locator.proximity_domain); default: diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index b6672b7881f7..0827f2c61f91 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -35,6 +35,8 @@ #include "mpam_internal.h" +extern int ddrc_freq; + /* * mpam_list_lock protects the SRCU lists when writing. Once the * mpam_enabled key is enabled these lists are read-only, @@ -692,6 +694,10 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) } } + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) + if (mpam_current_machine == MPAM_YITIAN710 && class->type == MPAM_CLASS_MEMORY) + mpam_set_feature(mpam_feat_impl_msmon_mbwu, props); + /* * RIS with PARTID narrowing don't have enough storage for one * configuration per PARTID. If these are in a class we could use, @@ -1015,6 +1021,45 @@ static void __ris_msmon_read(void *arg) *(m->val) += now; } +static void __ris_impl_msmon_read(void *arg) +{ + unsigned long flags; + struct mon_read *m = arg; + u64 mb_val = 0; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc *msc = m->ris->msc; + u32 custom_reg_base_addr, cycle, val; + + lockdep_assert_held(&msc->lock); + if (m->type != mpam_feat_impl_msmon_mbwu) + return; + + /* Other machine can extend this function */ + if (mpam_current_machine != MPAM_YITIAN710) + return; + + spin_lock_irqsave(&msc->part_sel_lock, flags); + + __mpam_write_reg(msc, MPAMCFG_PART_SEL, ctx->mon); + + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + + cycle = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_WINDW_OFFSET); + val = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_MBWC_OFFSET); + + spin_unlock_irqrestore(&msc->part_sel_lock, flags); + + if (val & MSMON___NRDY) { + m->err = -EBUSY; + return; + } + + mb_val = MBWU_GET(val); + + mb_val = mb_val * 32 * ddrc_freq * 1000000 / cycle; /* B/s */ + *(m->val) += mb_val; +} + static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) { int err, idx; @@ -1027,8 +1072,15 @@ static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) msc = ris->msc; mutex_lock(&msc->lock); - err = smp_call_function_any(&msc->accessibility, - __ris_msmon_read, arg, true); + if (arg->type == mpam_feat_msmon_csu || + arg->type == mpam_feat_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + else if (arg->type == mpam_feat_impl_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_impl_msmon_read, arg, true); + else + err = -EOPNOTSUPP; mutex_unlock(&msc->lock); if (!err && arg->err) err = arg->err; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 014524e5fa4f..d84413e5e031 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -108,6 +108,7 @@ enum mpam_device_features { mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, + mpam_feat_impl_msmon_mbwu, mpam_feat_partid_nrw, MPAM_FEATURE_LAST, }; @@ -574,4 +575,11 @@ void mpam_resctrl_exit(void); */ #define MSMON_CAPT_EVNT_NOW BIT(0) +/* Used for PTG Yitian710 specific MB monitoring feature */ +#define MBWU_MASK GENMASK(23, 0) +#define MBWU_WINWD_MAX GENMASK(22, 0) +#define MBWU_GET(v) ((v) & MBWU_MASK) +#define MPAMF_CUST_MBWC_OFFSET 0x08 +#define MPAMF_CUST_WINDW_OFFSET 0x0C + #endif /* MPAM_INTERNAL_H */ -- Gitee From 09d14609221e20c665c64e90c40a8efa3e151d27 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 8 May 2023 16:20:42 +0800 Subject: [PATCH 709/953] anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps ANBZ: #8686 Some platforms like Yitian710 can get the memory bandwidth of a specific PARTID in Bps directly, while current resctrl file system only support mbm_{local,total}_bytes as counters in bytes. Add a new resctrl monitoring event mbm_Bps to support this feature. To avoid introducing a new interface, remains the name "mbm_local_bytes" instead of "mbm_Bps" as before. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/x86/include/asm/resctrl.h | 5 +++++ arch/x86/kernel/cpu/resctrl/monitor.c | 2 ++ drivers/platform/mpam/mpam_resctrl.c | 29 +++++++++++++++++++++++++++ fs/resctrl/monitor.c | 16 +++++++++++++++ fs/resctrl/rdtgroup.c | 5 +++-- include/linux/arm_mpam.h | 1 + include/linux/resctrl_types.h | 3 +++ 7 files changed, 59 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 746431c66fc4..f159bddbec51 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -99,6 +99,11 @@ static inline bool resctrl_arch_is_mbm_local_enabled(void) return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); } +static inline bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return false; +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 02fb9d87479a..e0cc1b499279 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -134,6 +134,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; + default: + break; } /* Never expect to get here */ diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 25cf64386b64..98b3b1baa91e 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -35,6 +35,7 @@ static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; static struct mpam_class *mbm_total_class; +static struct mpam_class *mbm_bps_class; /* * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. @@ -79,6 +80,11 @@ bool resctrl_arch_is_mbm_total_enabled(void) return mbm_total_class; } +bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return mbm_bps_class; +} + bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { switch (rid) { @@ -272,6 +278,10 @@ static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, case QOS_L3_MBM_LOCAL_EVENT_ID: case QOS_L3_MBM_TOTAL_EVENT_ID: return mon_is_rmid_idx; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + return mon_is_rmid_idx; + return ERR_PTR(-EOPNOTSUPP); } return ERR_PTR(-EOPNOTSUPP); @@ -316,6 +326,7 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, return; case QOS_L3_MBM_TOTAL_EVENT_ID: case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_MC_MBM_BPS_EVENT_ID: return; } } @@ -355,6 +366,10 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, case QOS_L3_MBM_TOTAL_EVENT_ID: type = mpam_feat_msmon_mbwu; break; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + type = mpam_feat_impl_msmon_mbwu; + break; default: return -EINVAL; } @@ -487,6 +502,16 @@ static bool class_has_usable_mbwu(struct mpam_class *class) return (mpam_partid_max > 1) || (mpam_pmg_max != 0); } +static bool class_has_usable_impl_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_impl_msmon_mbwu, cprops)) + return false; + + return true; +} + static bool mba_class_use_mbw_part(struct mpam_props *cprops) { /* TODO: Scaling is not yet supported */ @@ -832,6 +857,10 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { mbm_total_class = class; r->mon_capable = true; + } else if (class_has_usable_impl_mbwu(class)) { + r->mon_capable = true; + if (mpam_current_machine == MPAM_YITIAN710) + mbm_bps_class = class; } } diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 06f660dfd929..51baa0f71b65 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -790,6 +790,11 @@ static struct mon_evt mbm_local_event = { .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, }; +static struct mon_evt mbm_bps_event = { + .name = "mbm_local_bytes", + .evtid = QOS_MC_MBM_BPS_EVENT_ID, +}; + /* * Initialize the event list for the resource. * @@ -809,6 +814,14 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_local_event.list, &r->evt_list); } +static void mc_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_mbm_bps_enabled()) + list_add_tail(&mbm_bps_event.list, &r->evt_list); +} + int resctrl_mon_resource_init(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); @@ -832,6 +845,9 @@ int resctrl_mon_resource_init(void) mbm_config_rftype_init("mbm_local_bytes_config"); } + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + mc_mon_evt_init(r); + return 0; } diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index ea969ddb1a9d..e39f22453d84 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -113,13 +113,14 @@ void rdt_staged_configs_clear(void) static bool resctrl_is_mbm_enabled(void) { return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled()); + resctrl_arch_is_mbm_local_enabled() || + resctrl_arch_is_mbm_bps_enabled()); } static bool resctrl_is_mbm_event(int e) { return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); + e <= QOS_MC_MBM_BPS_EVENT_ID); } /* diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 5423a2eff810..660776491941 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -81,6 +81,7 @@ bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); bool resctrl_arch_is_mbm_local_enabled(void); bool resctrl_arch_is_mbm_total_enabled(void); +bool resctrl_arch_is_mbm_bps_enabled(void); /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h index fe0b10b589c0..a0d8694be783 100644 --- a/include/linux/resctrl_types.h +++ b/include/linux/resctrl_types.h @@ -93,6 +93,9 @@ enum resctrl_event_id { QOS_L3_OCCUP_EVENT_ID = 0x01, QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + QOS_MC_MBM_BPS_EVENT_ID = 0x04, }; +#define RESCTRL_MAX_EVENT_NUM 4 + #endif /* __LINUX_RESCTRL_TYPES_H */ -- Gitee From 8d147c00710760e2d74811e0178d47d5ee6aef4f Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 May 2023 17:58:10 +0800 Subject: [PATCH 710/953] anolis: arm_mpam: Maximize Yitian710's MB monitoring window width ANBZ: #8686 To improve the vendor-defined MB monitoring accuracy on Yitian710, maximize the MB monitoring window width on reprogramming. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Reviewed-by: Baolin Wang Reviewed-by: Xin Hao Link: https://gitee.com/anolis/cloud-kernel/pulls/1643 [ add machine type checking ] Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 0827f2c61f91..e1a1dee8578c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1220,6 +1220,7 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, struct mpam_props *rprops = &ris->props; u16 dspri = GENMASK(rprops->dspri_wd, 0); u16 intpri = GENMASK(rprops->intpri_wd, 0); + u32 custom_reg_base_addr; spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); @@ -1276,6 +1277,15 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, mpam_write_partsel_reg(msc, PRI, pri_val); } + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) { + if (mpam_current_machine == MPAM_YITIAN710) { + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + __mpam_write_reg(msc, custom_reg_base_addr + + MPAMF_CUST_WINDW_OFFSET, + MBWU_WINWD_MAX); + } + } + spin_unlock(&msc->part_sel_lock); } -- Gitee From 834674b5e1980c9d110b4c5392f4f132c656a2ca Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Sun, 7 Apr 2024 10:42:41 +0800 Subject: [PATCH 711/953] anolis: arm_mpam: Fix kernel boot failure when the firmware does not support MPAM ANBZ: #8686 Accessing MPAM registers is forbidden when the firmware disables MPAM, even if MPAM is shown in ID_AA64PFR0_EL1 register. In such firmware, whether MPAM is disabled can only be known by the MPAM ACPI table. Since some MPAM registers are accessed before parsing the MPAM table in current implementation, when the kernel is booted from a firmware disabling MPAM, boot failure will occur. Fix it by removing all MPAM registers accessing before the MPAM table is parsed. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/780 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/include/asm/cpu.h | 1 - arch/arm64/include/asm/cpufeature.h | 6 --- arch/arm64/kernel/cpufeature.c | 62 +--------------------------- arch/arm64/kernel/cpuinfo.c | 4 -- arch/arm64/kernel/mpam.c | 10 ----- drivers/platform/mpam/mpam_devices.c | 40 +++++++++++++----- 6 files changed, 31 insertions(+), 92 deletions(-) diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 1cb5bafd9238..e749838b9c5d 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -47,7 +47,6 @@ struct cpuinfo_arm64 { u64 reg_revidr; u64 reg_gmid; u64 reg_smidr; - u64 reg_mpamidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e873848ad9d9..24c2564268e5 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -838,12 +838,6 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } -static inline bool cpus_support_mpam(void) -{ - return IS_ENABLED(CONFIG_ARM64_MPAM) && - cpus_have_final_cap(ARM64_MPAM); -} - int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d3fbceaa6369..7beb2491ec82 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -84,7 +84,6 @@ #include #include #include -#include #include #include #include @@ -624,18 +623,6 @@ static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_END, }; -static const struct arm64_ftr_bits ftr_mpamidr[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_PMG_MAX_SHIFT, MPAMIDR_PMG_MAX_LEN, 0), /* PMG_MAX */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_VPMR_MAX_SHIFT, MPAMIDR_VPMR_MAX_LEN, 0), /* VPMR_MAX */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, - MPAMIDR_HAS_HCR_SHIFT, 1, 0), /* HAS_HCR */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_PARTID_MAX_SHIFT, MPAMIDR_PARTID_MAX_LEN, 0), /* PARTID_MAX */ - ARM64_FTR_END, -}; - /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -752,9 +739,6 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), - /* Op1 = 0, CRn = 10, CRm = 4 */ - ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr), - /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1074,9 +1058,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) vec_init_vq_map(ARM64_VEC_SME); } - if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) - init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr); - if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); @@ -1336,11 +1317,6 @@ void update_cpu_features(int cpu, vec_update_vq_map(ARM64_VEC_SME); } - if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) { - taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu, - info->reg_mpamidr, boot->reg_mpamidr); - } - /* * The kernel uses the LDGM/STGM instructions and the number of tags * they read/write depends on the GMID_EL1.BS field. Check that the @@ -2263,39 +2239,6 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); } -static bool __maybe_unused -test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) -{ - if (!has_cpuid_feature(entry, scope)) - return false; - - /* Check firmware actually enabled MPAM on this cpu. */ - return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM_SYSREG_EN); -} - -static void __maybe_unused -cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) -{ - /* - * Access by the kernel (at EL1) should use the reserved PARTID - * which is configured unrestricted. This avoids priority-inversion - * where latency sensitive tasks have to wait for a task that has - * been throttled to release the lock. - */ - write_sysreg_s(0, SYS_MPAM1_EL1); -} - -static void mpam_extra_caps(void) -{ - u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); - - if (!IS_ENABLED(CONFIG_ARM64_MPAM)) - return; - - if (idr & MPAMIDR_HAS_HCR) - __enable_mpam_hcr(); -} - static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2781,8 +2724,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "Memory Partitioning And Monitoring", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_MPAM, - .matches = test_has_mpam, - .cpu_enable = cpu_enable_mpam, + .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) }, #endif @@ -3433,8 +3375,6 @@ void __init setup_cpu_features(void) if (!cwg) pr_warn("No Cache Writeback Granule information, assuming %d\n", ARCH_DMA_MINALIGN); - - mpam_extra_caps(); } static int enable_mismatched_32bit_el0(unsigned int cpu) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 1b1fe0f58a86..98fda8500535 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -460,10 +460,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) __cpuinfo_store_cpu_32bit(&info->aarch32); - if (IS_ENABLED(CONFIG_ARM64_MPAM) && - id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) - info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); - cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index 02f43334f078..134b44118553 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -11,13 +11,3 @@ DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); DEFINE_STATIC_KEY_FALSE(mpam_enabled); DEFINE_PER_CPU(u64, arm64_mpam_default); DEFINE_PER_CPU(u64, arm64_mpam_current); - -static int __init arm64_mpam_register_cpus(void) -{ - u64 mpamidr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); - u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); - u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); - - return mpam_register_requestor(partid_max, pmg_max); -} -arch_initcall(arm64_mpam_register_cpus) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e1a1dee8578c..906f8a6b6940 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2451,9 +2451,19 @@ static void mpam_dt_create_foundling_msc(void) } } +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sysreg_s(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} + static int __init mpam_msc_driver_init(void) { bool mpam_not_available = false; + int err; if (!mpam_cpus_have_feature()) return -EOPNOTSUPP; @@ -2465,6 +2475,26 @@ static int __init mpam_msc_driver_init(void) else mpam_current_machine = mpam_dt_get_machine_type(); + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + /* + * Access MPAM system registers after MPAM ACPI table is parsed, since + * some BIOSs disable MPAM system registers accessing but export MPAM in + * ID_AA64PFR0_EL1. So we can only rely on the MPAM ACPI table to + * determine whether MPAM feature is enabled. + */ + err = arm64_mpam_register_cpus(); + if (err) + return err; + /* * If the MPAM CPU interface is not implemented, or reserved by * firmware, there is no point touching the rest of the hardware. @@ -2477,16 +2507,6 @@ static int __init mpam_msc_driver_init(void) if (mpam_not_available) return 0; - if (!acpi_disabled) - fw_num_msc = acpi_mpam_count_msc(); - else - fw_num_msc = mpam_dt_count_msc(); - - if (fw_num_msc <= 0) { - pr_err("No MSC devices found in firmware\n"); - return -EINVAL; - } - if (acpi_disabled) mpam_dt_create_foundling_msc(); -- Gitee From 5df836b1e80f95b3a29c34e6baf9f5d6b4e29304 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 15 Nov 2023 15:39:44 +0800 Subject: [PATCH 712/953] anolis: KVM: arm64: Only access MPAM registers when MPAM is enabled ANBZ: #8686 The commmit dda408b60b40 ("KVM: arm64: Trap guest accesses to the MPAM registers") will access MPAM registers like MPAM2_EL2 when mpam_cpus_have_feature() returns true. However, mpam_cpus_have_feature() only check the MPAM bits in ID_AA64PFR0_EL1. But in some BIOS, it is also necessary to add a check for the existence of the MPAM ACPI table. Otherwise, access to these registers will cause the system to crash. To fix this error, add a condition on mpam_enabled. mpam_enabled is only true if the MPAM ACPI table exists and the host kernel enables MPAM. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2437 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 6 ++++-- arch/arm64/kvm/hyp/nvhe/switch.c | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d10d3fed31d9..6999668e9ecf 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -67,6 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); /* Additional static keys for cpufeatures */ #ifdef CONFIG_ARM64_MPAM KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +KVM_NVHE_ALIAS(mpam_enabled); #endif /* Static keys which are set if a vGIC trap should be handled in hyp. */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 657320f453e6..da30acce6308 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -177,7 +177,7 @@ static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) { u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; - if (!mpam_cpus_have_feature()) + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) return; /* trap guest access to MPAMIDR_EL1 */ @@ -193,7 +193,7 @@ static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_mpam(void) { - if (!mpam_cpus_have_feature()) + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) return; write_sysreg_s(0, SYS_MPAM2_EL2); diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index c8767abd693e..8e99f66b377b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -250,7 +250,8 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) */ static inline void __mpam_guest_load(void) { - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); } @@ -264,7 +265,8 @@ static inline void __mpam_guest_put(void) u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) { + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) { val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); write_sysreg_el1(val, SYS_MPAM1); } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c2118f658e22..04b7f83c2ae3 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -245,7 +245,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) /* Use the host thread's partid and pmg for world switch */ static void __mpam_copy_el1_to_el2(void) { - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); } -- Gitee From 20f82dbbe42570d8579d90eb0750715654f69d43 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Sun, 7 Apr 2024 14:25:42 +0800 Subject: [PATCH 713/953] anolis: configs: arm64: Enable ARM64_MPAM ANBZ: #8686 Enable ARM64_MPAM to use it for cache and memory bandwidth resource partitioning and monitoring. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 97230e6e79b5..1c039289682b 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -500,6 +500,7 @@ CONFIG_AS_HAS_LDAPR=y CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y # end of ARMv8.4 architectural features # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 6c0af4f2c954..4b995ee48cd6 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -498,6 +498,7 @@ CONFIG_AS_HAS_LDAPR=y CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y # end of ARMv8.4 architectural features # -- Gitee From 2a463bfe3ad2f11e061595f33685b87041df2129 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 7 Jul 2023 10:47:37 +0800 Subject: [PATCH 714/953] anolis: sched/isolation: fix a memory leak in procfs ANBZ: #8684 kmemleak reported: unreferenced object 0xffff002aadb62380 (size 128): comm "stress-ng-procf", pid 20578, jiffies 4296227961 (age 75018.420s) hex dump (first 32 bytes): 70 51 95 10 00 a0 ff ff 30 52 95 10 00 a0 ff ff pQ......0R...... d0 51 95 10 00 a0 ff ff a0 df 22 10 00 a0 ff ff .Q........"..... backtrace: [<00000000197d68ab>] kmem_cache_alloc_trace+0x1e0/0x460 [<0000000030298c46>] single_open+0x58/0x1ac [<00000000bde8ff2a>] dyn_isolcpus_open+0x24/0x2c [<00000000b2d1b210>] proc_reg_open+0x2a8/0x4b0 [<0000000070f0cdd4>] do_dentry_open+0x3bc/0xe54 [<000000004e13fb43>] vfs_open+0x94/0xd0 [<000000006631615d>] do_open+0x538/0x904 [<000000007cd55e85>] path_openat+0x1b4/0x3d4 [<00000000aee11823>] do_filp_open+0x140/0x310 [<00000000ddf652cc>] do_sys_openat2+0x124/0x330 [<00000000b88efbfc>] __arm64_sys_openat+0x13c/0x1c4 [<000000008f6ca77d>] el0_svc_common+0x154/0x520 [<0000000055a96fd0>] do_el0_svc+0xac/0xd4 [<00000000ea0094cf>] el0_svc+0x1c/0x30 [<0000000033b4e46c>] el0_sync_handler+0xa8/0xac [<00000000f389ba95>] el0_sync+0x168/0x180 unreferenced object 0xffff0041fd058db8 (size 232): comm "stress-ng-procf", pid 20578, jiffies 4296227961 (age 75018.420s) hex dump (first 32 bytes): 00 10 3b 9e 40 00 ff ff 00 10 00 00 00 00 00 00 ..;.@........... 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<00000000c9b5f866>] kmem_cache_alloc+0x1d4/0x43c [<0000000057ea497b>] seq_open+0x54/0x150 [<00000000c1a5ea00>] single_open+0xf0/0x1ac [<00000000bde8ff2a>] dyn_isolcpus_open+0x24/0x2c [<00000000b2d1b210>] proc_reg_open+0x2a8/0x4b0 [<0000000070f0cdd4>] do_dentry_open+0x3bc/0xe54 [<000000004e13fb43>] vfs_open+0x94/0xd0 [<000000006631615d>] do_open+0x538/0x904 [<000000007cd55e85>] path_openat+0x1b4/0x3d4 [<00000000aee11823>] do_filp_open+0x140/0x310 [<00000000ddf652cc>] do_sys_openat2+0x124/0x330 [<00000000b88efbfc>] __arm64_sys_openat+0x13c/0x1c4 [<000000008f6ca77d>] el0_svc_common+0x154/0x520 [<0000000055a96fd0>] do_el0_svc+0xac/0xd4 [<00000000ea0094cf>] el0_svc+0x1c/0x30 [<0000000033b4e46c>] el0_sync_handler+0xa8/0xac Because proc_dyn_isolcpus_operations is not set proc_release handler. Fix it. Fixes: 1d4479f87314 ("anolis: sched/isolation: dynamical CPU isolation support") Signed-off-by: Tianchen Ding Reviewed-by: Cruz Link: https://gitee.com/anolis/cloud-kernel/pulls/3017 --- kernel/sched/isolation.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 11f11f0cb239..6b045ffa0459 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -408,6 +408,7 @@ static const struct proc_ops proc_dyn_isolcpus_operations = { .proc_read = seq_read, .proc_write = write_dyn_isolcpus, .proc_lseek = noop_llseek, + .proc_release = single_release, }; static int __init dyn_isolcpus_init(void) -- Gitee From 21f55853bd78df81d8d2a85140d54e4946e03f6e Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 29 Aug 2023 11:13:57 +0100 Subject: [PATCH 715/953] Documentation: ABI: debugfs-driver-qat: fix fw_counters path ANBZ: #8589 commit 7ba98583448b7a0dbfa8121c7be642651e0abd61 upstream. Intel-SIG: commit 7ba98583448b Documentation: ABI: debugfs-driver-qat: fix fw_counters path Backport to support Intel QAT in-tree driver The debugfs description for fw_counters reports an incorrect path indicating a qat folder that does not exist. Fix it. Fixes: 865b50fe6ea8 ("crypto: qat - add fw_counters debugfs file") Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 6731ffacc5f0..3f9b4f708051 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -1,4 +1,4 @@ -What: /sys/kernel/debug/qat__/qat/fw_counters +What: /sys/kernel/debug/qat__/fw_counters Date: November 2023 KernelVersion: 6.6 Contact: qat-linux@intel.com -- Gitee From 37c7f58c0357791e370d178688f49447d69dc083 Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Wed, 30 Aug 2023 15:54:51 +0800 Subject: [PATCH 716/953] crypto: qat - Use list_for_each_entry() helper ANBZ: #8589 commit 65029eec5ceba7d847f27171cdddb046bdc3a069 upstream. Intel-SIG: commit 65029eec5ceb crypto: qat - Use list_for_each_entry() helper Backport to support Intel QAT in-tree driver Convert list_for_each() to list_for_each_entry() so that the list_itr list_head pointer and list_entry() call are no longer needed, which can reduce a few lines of code. No functional changed. Signed-off-by: Jinjie Ruan Reviewed-by: Andy Shevchenko Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_init.c | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0f9e2d59ce38..b4cf605ccf3e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -61,7 +61,6 @@ int adf_service_unregister(struct service_hndl *service) static int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; @@ -140,8 +139,7 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise service %s\n", @@ -168,7 +166,6 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -212,8 +209,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) adf_heartbeat_start(accel_dev); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), "Failed to start service %s\n", @@ -264,7 +260,6 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; bool wait = false; int ret; @@ -289,8 +284,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) qat_comp_algs_unregister(); clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->start_status)) continue; ret = service->event_hld(accel_dev, ADF_EVENT_STOP); @@ -327,7 +321,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -349,8 +342,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) &accel_dev->status); } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) @@ -387,10 +379,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -402,10 +392,8 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", -- Gitee From 3307f2f84142c0ab213f0bc53c11d36663c69aac Mon Sep 17 00:00:00 2001 From: Justin Stitt Date: Wed, 13 Sep 2023 00:51:05 +0000 Subject: [PATCH 717/953] crypto: qat - refactor deprecated strncpy ANBZ: #8589 commit 3102bbcdcd3c945ef0bcea498d3a0c6384536d6c upstream. Intel-SIG: commit 3102bbcdcd3c crypto: qat - refactor deprecated strncpy Backport to support Intel QAT in-tree driver `strncpy` is deprecated for use on NUL-terminated destination strings [1]. We should prefer more robust and less ambiguous string interfaces. `buf` is expected to be NUL-terminated for its eventual use in `kstrtoul()` and NUL-padding is not required. Due to the above, a suitable replacement is `strscpy` [2] due to the fact that it guarantees NUL-termination on the destination buffer. Link: https://www.kernel.org/doc/html/latest/process/deprecated.html#strncpy-on-nul-terminated-strings [1] Link: https://manpages.debian.org/testing/linux-manual-4.8/strscpy.9.en.html [2] Link: https://github.com/KSPP/linux/issues/90 Cc: linux-hardening@vger.kernel.org Signed-off-by: Justin Stitt Acked-by: Giovanni Cabiddu Reviewed-by: Kees Cook Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 4bd150d1441a..e27ea7e28c51 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,7 +200,7 @@ static int qat_uclo_parse_num(char *str, unsigned int *num) unsigned long ae = 0; int i; - strncpy(buf, str, 15); + strscpy(buf, str, sizeof(buf)); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; -- Gitee From 3d4cfd2c9140a7bde34aba2adb92c3aea9f02ba6 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Thu, 14 Sep 2023 10:55:46 +0100 Subject: [PATCH 718/953] crypto: qat - do not shadow error code ANBZ: #8589 commit c362a58e8da7828cf1501e1af9d43cd6c9641c5b upstream. Intel-SIG: commit c362a58e8da7 crypto: qat - do not shadow error code Backport to support Intel QAT in-tree driver Do not shadow the return code from adf_dev_down() in the error path of the DEV_DOWN command. Signed-off-by: Giovanni Cabiddu Reviewed-by: Adam Guerin Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 8f04b0d3c5ac..f4a89f7ed4e9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -61,8 +61,8 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } ret = adf_dev_down(accel_dev, true); - if (ret < 0) - return -EINVAL; + if (ret) + return ret; break; case DEV_UP: -- Gitee From be3ebe42f2179ce60ccf67fe3a0390c69d503b0f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 22 Sep 2023 10:54:33 -0700 Subject: [PATCH 719/953] crypto: qat - Annotate struct adf_fw_counters with __counted_by ANBZ: #8589 commit 141f12be09ac693e2384a7999f6782c7750c30a5 upstream. Intel-SIG: commit 141f12be09ac crypto: qat - Annotate struct adf_fw_counters with __counted_by Backport to support Intel QAT in-tree driver Prepare for the coming implementation by GCC and Clang of the __counted_by attribute. Flexible array members annotated with __counted_by can have their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family functions). As found with Coccinelle[1], add __counted_by for struct adf_fw_counters. [1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci Cc: Giovanni Cabiddu Cc: Herbert Xu Cc: "David S. Miller" Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Tom Rix Cc: Adam Guerin Cc: Lucas Segarra Fernandez Cc: Andy Shevchenko Cc: qat-linux@intel.com Cc: linux-crypto@vger.kernel.org Cc: llvm@lists.linux.dev Signed-off-by: Kees Cook Reviewed-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_fw_counters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index cb6e09ef5c9f..6abe4736eab8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -34,7 +34,7 @@ struct adf_ae_counters { struct adf_fw_counters { u16 ae_count; - struct adf_ae_counters ae_counters[]; + struct adf_ae_counters ae_counters[] __counted_by(ae_count); }; static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae, -- Gitee From bbc96af1eb17f9d7780412a27a88b78ab3af2110 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:55:02 +0800 Subject: [PATCH 720/953] crypto: qat - Remove zlib-deflate ANBZ: #8589 commit e9dd20e0e5f62d01d9404db2cf9824d1faebcf71 upstream. Intel-SIG: commit e9dd20e0e5f6 crypto: qat - Remove zlib-deflate Backport to support Intel QAT in-tree driver Remove the implementation of zlib-deflate because it is completely unused in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/qat_comp_algs.c | 129 +----------------- 1 file changed, 1 insertion(+), 128 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index b533984906ec..bf8c0ee62917 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -109,69 +109,6 @@ static void qat_comp_resubmit(struct work_struct *work) acomp_request_complete(areq, ret); } -static int parse_zlib_header(u16 zlib_h) -{ - int ret = -EINVAL; - __be16 header; - u8 *header_p; - u8 cmf, flg; - - header = cpu_to_be16(zlib_h); - header_p = (u8 *)&header; - - flg = header_p[0]; - cmf = header_p[1]; - - if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K) - return ret; - - if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE) - return ret; - - if (flg & QAT_RFC_1950_DICT_MASK) - return ret; - - return 0; -} - -static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req, - void *resp) -{ - struct acomp_req *areq = qat_req->acompress_req; - enum direction dir = qat_req->dir; - __be32 qat_produced_adler; - - qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp)); - - if (dir == COMPRESSION) { - __be16 zlib_header; - - zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR); - scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1); - areq->dlen += QAT_RFC_1950_HDR_SIZE; - - scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen, - QAT_RFC_1950_FOOTER_SIZE, 1); - areq->dlen += QAT_RFC_1950_FOOTER_SIZE; - } else { - __be32 decomp_adler; - int footer_offset; - int consumed; - - consumed = qat_comp_get_consumed_ctr(resp); - footer_offset = consumed + QAT_RFC_1950_HDR_SIZE; - if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen) - return -EBADMSG; - - scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset, - QAT_RFC_1950_FOOTER_SIZE, 0); - - if (qat_produced_adler != decomp_adler) - return -EBADMSG; - } - return 0; -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -293,18 +230,6 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) memset(ctx, 0, sizeof(*ctx)); } -static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm) -{ - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - int ret; - - ret = qat_comp_alg_init_tfm(acomp_tfm); - ctx->qat_comp_callback = &qat_comp_rfc1950_callback; - - return ret; -} - static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, unsigned int shdr, unsigned int sftr, unsigned int dhdr, unsigned int dftr) @@ -400,43 +325,6 @@ static int qat_comp_alg_decompress(struct acomp_req *req) return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } -static int qat_comp_alg_rfc1950_compress(struct acomp_req *req) -{ - if (!req->dst && req->dlen != 0) - return -EINVAL; - - if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EINVAL; - - return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, - QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE); -} - -static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req) -{ - struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - u16 zlib_header; - int ret; - - if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EBADMSG; - - scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0); - - ret = parse_zlib_header(zlib_header); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n"); - return ret; - } - - return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE, 0, 0); -} - static struct acomp_alg qat_acomp[] = { { .base = { .cra_name = "deflate", @@ -452,22 +340,7 @@ static struct acomp_alg qat_acomp[] = { { .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), -}, { - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "qat_zlib_deflate", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct qat_compression_ctx), - .cra_module = THIS_MODULE, - }, - .init = qat_comp_alg_rfc1950_init_tfm, - .exit = qat_comp_alg_exit_tfm, - .compress = qat_comp_alg_rfc1950_compress, - .decompress = qat_comp_alg_rfc1950_decompress, - .dst_free = sgl_free, - .reqsize = sizeof(struct qat_compression_req), -} }; +}}; int qat_comp_algs_register(void) { -- Gitee From fc33288feb7a2f189dc04eb40d707dc6e748c84f Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 2 Oct 2023 09:51:09 +0100 Subject: [PATCH 721/953] crypto: qat - add namespace to driver ANBZ: #8589 commit 4999999ed7e099fcc2476c8b3a245c4c2c9026c0 upstream. Intel-SIG: commit 4999999ed7e0 crypto: qat - add namespace to driver Backport to support Intel QAT in-tree driver Create CRYPTO_QAT namespace for symbols exported by the qat_common module and import those in the QAT drivers. It will reduce the global namespace crowdedness and potential misuse or the API. This does not introduce any functional change. Suggested-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Reviewed-by: Lucas Segarra Fernandez Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c62x/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c | 1 + 8 files changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 90f5c1ca7b8d..2ccd1223f1ef 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -467,3 +467,4 @@ MODULE_FIRMWARE(ADF_4XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 468c9102093f..956a4c85609a 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW); MODULE_FIRMWARE(ADF_C3XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index d5a0ecca9d0b..a8de9cd09c05 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 0186921be936..ad0ca4384998 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW); MODULE_FIRMWARE(ADF_C62X_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index c9ae6c0d0dca..53b8ddb63364 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 8dbf146de3fa..a3c611264caf 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT intel_qat-objs := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 1e748e8ce12d..40b456b8035b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_FIRMWARE(ADF_DH895XCC_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index fefb85ceaeb9..d59cb1ba2ad5 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); -- Gitee From ccd16520a37be2185b34ea28feef50d472af50cf Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:09:19 +0200 Subject: [PATCH 722/953] crypto: qat - refactor included headers ANBZ: #8589 commit 756762decc604a5ac5c041f23dd447c5e691f459 upstream. Intel-SIG: commit 756762decc60 crypto: qat - refactor included headers Backport to support Intel QAT in-tree driver Include kernel.h for GENMASK(), kstrtobool() and types. Add forward declaration for struct adf_accel_dev. Remove unneeded include. This change doesn't introduce any function change. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c | 2 ++ drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index 34c6cd8e27c0..b0e60471163c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -2,6 +2,8 @@ /* Copyright(c) 2022 Intel Corporation */ #include #include +#include + #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index c2768762cca3..39d37b352b45 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -3,7 +3,9 @@ #ifndef ADF_GEN4_PM_H #define ADF_GEN4_PM_H -#include "adf_accel_devices.h" +#include + +struct adf_accel_dev; /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) -- Gitee From 59e32e5167c8781181ff8859980c58274201f4e2 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:09:20 +0200 Subject: [PATCH 723/953] crypto: qat - add pm_status debugfs file ANBZ: #8589 commit e079231676e05d6c88fba4585db1ac399a790b63 upstream. Intel-SIG: commit e079231676e0 crypto: qat - add pm_status debugfs file Backport to support Intel QAT in-tree driver QAT devices implement a mechanism that allows them to go autonomously to a low power state depending on the load. Expose power management info by providing the "pm_status" file under debugfs. This includes PM state, PM event log, PM event counters, PM HW CSRs, per-resource type constrain counters and per-domain power gating status specific to the QAT device. This information is retrieved from (1) the FW by means of ICP_QAT_FW_PM_INFO command, (2) CSRs and (3) counters collected by the device driver. In addition, add logic to keep track and report power management event interrupts and acks/nacks sent to FW to allow/prevent state transitions. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 9 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 13 + .../crypto/intel/qat/qat_common/adf_admin.c | 27 ++ .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../crypto/intel/qat/qat_common/adf_gen4_pm.c | 24 +- .../crypto/intel/qat/qat_common/adf_gen4_pm.h | 46 +++ .../qat/qat_common/adf_gen4_pm_debugfs.c | 265 ++++++++++++++++++ .../intel/qat/qat_common/adf_pm_dbgfs.c | 48 ++++ .../intel/qat/qat_common/adf_pm_dbgfs.h | 12 + .../qat/qat_common/icp_qat_fw_init_admin.h | 35 +++ 12 files changed, 480 insertions(+), 5 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 3f9b4f708051..0656f27d1042 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -59,3 +59,12 @@ Description: (RO) Read returns the device health status. The driver does not monitor for Heartbeat. It is left for a user to poll the status periodically. + +What: /sys/kernel/debug/qat__/pm_status +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns power management information specific to the + QAT device. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index a3c611264caf..2f0330651622 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -33,8 +33,10 @@ intel_qat-objs := adf_cfg.o \ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_gen4_pm_debugfs.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_pm_dbgfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 79d5a1535eda..36c6a6bf4a66 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -292,6 +292,18 @@ struct adf_dc_data { dma_addr_t ovf_buff_p; }; +struct adf_pm { + struct dentry *debugfs_pm_status; + bool present; + int idle_irq_counters; + int throttle_irq_counters; + int fw_irq_counters; + int host_ack_counter; + int host_nack_counter; + ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, loff_t *pos); +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; @@ -299,6 +311,7 @@ struct adf_accel_dev { struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; struct adf_dc_data *dc_data; + struct adf_pm power_management; struct list_head crypto_list; struct list_head compression_list; unsigned long status; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 194d64d4b99a..2d45167b48a0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -379,6 +379,33 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, + size_t buff_size) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + u32 ae_mask = hw_data->admin_ae_mask; + int ret; + + /* Query pm info via init/admin cmd */ + if (!accel_dev->admin) { + dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n"); + return -EFAULT; + } + + req.cmd_id = ICP_QAT_FW_PM_INFO; + req.init_cfg_sz = buff_size; + req.init_cfg_ptr = p_state_addr; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to query power-management info\n"); + + return ret; +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 79ff7982378d..46dd81074166 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -95,6 +95,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 04845f8d72be..395bb493f20c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -8,6 +8,7 @@ #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" +#include "adf_pm_dbgfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -62,6 +63,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) if (!accel_dev->is_vf) { adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); + adf_pm_dbgfs_add(accel_dev); } } @@ -75,6 +77,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) return; if (!accel_dev->is_vf) { + adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index b0e60471163c..c663d3a20c5b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -12,11 +12,6 @@ #include "adf_gen4_hw_data.h" #include "adf_cfg.h" -enum qat_pm_host_msg { - PM_NO_CHANGE = 0, - PM_SET_MIN, -}; - struct adf_gen4_pm_data { struct work_struct pm_irq_work; struct adf_accel_dev *accel_dev; @@ -27,6 +22,7 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) { char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; bool pm_idle_support; u32 msg; int ret; @@ -41,6 +37,11 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) if (ret) pm_idle_support = true; + if (pm_idle_support) + pm->host_ack_counter++; + else + pm->host_nack_counter++; + /* Send HOST_MSG */ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE); @@ -61,17 +62,27 @@ static void pm_bh_handler(struct work_struct *work) container_of(work, struct adf_gen4_pm_data, pm_irq_work); struct adf_accel_dev *accel_dev = pm_data->accel_dev; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; u32 pm_int_sts = pm_data->pm_int_sts; u32 val; /* PM Idle interrupt */ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) { + pm->idle_irq_counters++; /* Issue host message to FW */ if (send_host_msg(accel_dev)) dev_warn_ratelimited(&GET_DEV(accel_dev), "Failed to send host msg to FW\n"); } + /* PM throttle interrupt */ + if (pm_int_sts & ADF_GEN4_PM_THR_STS) + pm->throttle_irq_counters++; + + /* PM fw interrupt */ + if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS) + pm->fw_irq_counters++; + /* Clear interrupt status */ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts); @@ -131,6 +142,9 @@ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev) if (ret) return ret; + /* Initialize PM internal data */ + adf_gen4_init_dev_pm_data(accel_dev); + /* Enable default PM interrupts: IDLE, THROTTLE */ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); val |= ADF_GEN4_PM_INT_EN_DEFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index 39d37b352b45..a49352b79a7a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -7,6 +7,11 @@ struct adf_accel_dev; +enum qat_pm_host_msg { + PM_NO_CHANGE = 0, + PM_SET_MIN, +}; + /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) @@ -41,7 +46,48 @@ struct adf_accel_dev; #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) +/* PM CSRs fields masks */ +#define ADF_GEN4_PM_DOMAIN_POWER_GATED_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_SSM_PM_ENABLE_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_IDLE_FILTER_MASK GENMASK(5, 3) +#define ADF_GEN4_PM_IDLE_ENABLE_MASK BIT(2) +#define ADF_GEN4_PM_ENABLE_PM_MASK BIT(21) +#define ADF_GEN4_PM_ENABLE_PM_IDLE_MASK BIT(22) +#define ADF_GEN4_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23) +#define ADF_GEN4_PM_CURRENT_WP_MASK GENMASK(19, 11) +#define ADF_GEN4_PM_CPM_PM_STATE_MASK GENMASK(22, 20) +#define ADF_GEN4_PM_PENDING_WP_MASK GENMASK(31, 23) +#define ADF_GEN4_PM_THR_VALUE_MASK GENMASK(6, 4) +#define ADF_GEN4_PM_MIN_PWR_ACK_MASK BIT(7) +#define ADF_GEN4_PM_MIN_PWR_ACK_PENDING_MASK BIT(17) +#define ADF_GEN4_PM_CPR_ACTIVE_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_CPR_MANAGED_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_XLT_ACTIVE_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_XLT_MANAGED_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_DCPR_ACTIVE_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_DCPR_MANAGED_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_PKE_ACTIVE_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_PKE_MANAGED_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_WAT_ACTIVE_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WAT_MANAGED_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WCP_ACTIVE_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_WCP_MANAGED_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_UCS_ACTIVE_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_UCS_MANAGED_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_CPH_ACTIVE_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_CPH_MANAGED_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_ATH_ACTIVE_COUNT_MASK GENMASK(28, 25) +#define ADF_GEN4_PM_ATH_MANAGED_COUNT_MASK GENMASK(28, 25) + int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev); +#else +static inline void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c new file mode 100644 index 000000000000..5114759287c6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_pm.h" +#include "icp_qat_fw_init_admin.h" + +/* + * This is needed because a variable is used to index the mask at + * pm_scnprint_table(), making it not compile time constant, so the compile + * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled. + */ +#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) + +#define PM_INFO_MEMBER_OFF(member) \ + (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32)) + +#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \ +{ \ + .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \ + .key = __stringify(_field_), \ + .field_mask = _mask_, \ +} + +#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0)) + +#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK) + +#define PM_INFO_MAX_KEY_LEN 21 + +struct pm_status_row { + int reg_offset; + u32 field_mask; + const char *key; +}; + +static struct pm_status_row pm_fuse_rows[] = { + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE), +}; + +static struct pm_status_row pm_info_rows[] = { + PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE), + PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP), + PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER), + PM_INFO_REGSET_ENTRY(pm.main, MIN_PWR_ACK), + PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING), + PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE), +}; + +static struct pm_status_row pm_ssm_rows[] = { + PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE), + PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT), + PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, ATH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, PKE_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, DCPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, UCS_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, XLT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WAT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WCP_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, ATH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, PKE_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, DCPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, UCS_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, XLT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WAT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT), +}; + +static struct pm_status_row pm_log_rows[] = { + PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.timer, TIMER_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT), +}; + +static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = { + PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0), + PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1), + PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2), + PM_INFO_REGSET_ENTRY32(event_log[3], EVENT3), + PM_INFO_REGSET_ENTRY32(event_log[4], EVENT4), + PM_INFO_REGSET_ENTRY32(event_log[5], EVENT5), + PM_INFO_REGSET_ENTRY32(event_log[6], EVENT6), + PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7), +}; + +static struct pm_status_row pm_csrs_rows[] = { + PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT), + PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS), + PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW), + PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ), +}; + +static int pm_scnprint_table(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, int table_len, + bool lowercase) +{ + char key[PM_INFO_MAX_KEY_LEN]; + int wr = 0; + int i; + + for (i = 0; i < table_len; i++) { + if (lowercase) + string_lower(key, table[i].key); + else + string_upper(key, table[i].key); + + wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key, + field_get(table[i].field_mask, + pm_info_regs[table[i].reg_offset])); + } + + return wr; +} + +static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, false); +} + +static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, true); +} + +static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE); + +static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, + loff_t *pos) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; + struct icp_qat_fw_init_admin_pm_info *pm_info; + dma_addr_t p_state_addr; + u32 *pm_info_regs; + char *pm_kv; + int len = 0; + u32 val; + int ret; + + pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_info) + return -ENOMEM; + + pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_kv) { + ret = -ENOMEM; + goto out_free; + } + + p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE, + DMA_FROM_DEVICE); + ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr); + if (ret) + goto out_free; + + /* Query PM info from QAT FW */ + ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE); + dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto out_free; + + pm_info_regs = (u32 *)pm_info; + + /* Fusectl related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- PM Fuse info ---------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_fuse_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n", + pm_info->max_pwrreq); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n", + pm_info->min_pwrreq); + + /* PM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------ PM Info ------------\n"); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n", + pm_info->pwr_state == PM_SET_MIN ? "min" : "max"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_info_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n"); + + /* SSM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- SSM_PM Info ----------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_ssm_rows)); + + /* Log related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------- PM Log -------------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_log_rows)); + + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_event_rows)); + + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n", + pm->idle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n", + pm->fw_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "throttle_irq_count: %#x\n", pm->throttle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n", + pm->host_ack_counter); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n", + pm->host_nack_counter); + + /* CSRs content */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- HW PM CSRs -----------\n"); + len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_csrs_rows)); + + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_HOST_MSG: %#x\n", val); + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_INTERRUPT: %#x\n", val); + ret = simple_read_from_buffer(buf, count, pos, pm_kv, len); + +out_free: + kfree(pm_info); + kfree(pm_kv); + return ret; +} + +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ + accel_dev->power_management.print_pm_status = adf_gen4_print_pm_status; + accel_dev->power_management.present = true; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c new file mode 100644 index 000000000000..f0a13c190196 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_pm_dbgfs.h" + +static ssize_t pm_status_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + struct adf_accel_dev *accel_dev = file_inode(f)->i_private; + struct adf_pm pm = accel_dev->power_management; + + if (pm.print_pm_status) + return pm.print_pm_status(accel_dev, buf, count, pos); + + return count; +} + +static const struct file_operations pm_status_fops = { + .owner = THIS_MODULE, + .read = pm_status_read, +}; + +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present || !pm->print_pm_status) + return; + + pm->debugfs_pm_status = debugfs_create_file("pm_status", 0400, + accel_dev->debugfs_dir, + accel_dev, &pm_status_fops); +} + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present) + return; + + debugfs_remove(pm->debugfs_pm_status); + pm->debugfs_pm_status = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h new file mode 100644 index 000000000000..83632e5aa097 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_PM_DBGFS_H_ +#define ADF_PM_DBGFS_H_ + +struct adf_accel_dev; + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev); +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev); + +#endif /* ADF_PM_DBGFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 019a6443834e..2ebbec75d778 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -20,6 +20,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_PM_STATE_CONFIG = 128, + ICP_QAT_FW_PM_INFO = 129, }; enum icp_qat_fw_init_admin_resp_status { @@ -108,4 +109,38 @@ struct icp_qat_fw_init_admin_resp { #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_NUMBER_OF_PM_EVENTS 8 + +struct icp_qat_fw_init_admin_pm_info { + __u16 max_pwrreq; + __u16 min_pwrreq; + __u16 resvrd1; + __u8 pwr_state; + __u8 resvrd2; + __u32 fusectl0; + struct_group(event_counters, + __u32 sys_pm; + __u32 host_msg; + __u32 unknown; + __u32 local_ssm; + __u32 timer; + ); + __u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS]; + struct_group(pm, + __u32 fw_init; + __u32 pwrreq; + __u32 status; + __u32 main; + __u32 thread; + ); + struct_group(ssm, + __u32 pm_enable; + __u32 pm_active_status; + __u32 pm_managed_status; + __u32 pm_domain_status; + __u32 active_constraint; + ); + __u32 resvrd3[6]; +}; + #endif -- Gitee From 47bd7489225b7f9e87c5001f8b00d092ef666483 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:36:42 +0200 Subject: [PATCH 724/953] crypto: qat - add cnv_errors debugfs file ANBZ: #8589 commit d807f0240c713bdd7c81a7e212f2feb0b5cd6725 upstream. Intel-SIG: commit d807f0240c71 crypto: qat - add cnv_errors debugfs file Backport to support Intel QAT in-tree driver The Compress and Verify (CnV) feature check and ensures data integrity in the compression operation. The implementation of CnV keeps a record of the CnV errors that have occurred since the driver was loaded. Expose CnV error stats by providing the "cnv_errors" file under debugfs. This includes the number of errors detected up to now and the type of the last error. The error count is provided on a per Acceleration Engine basis and it is reset every time the driver is loaded. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 13 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 21 ++ .../intel/qat/qat_common/adf_cnv_dbgfs.c | 299 ++++++++++++++++++ .../intel/qat/qat_common/adf_cnv_dbgfs.h | 11 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../qat/qat_common/icp_qat_fw_init_admin.h | 5 + 9 files changed, 355 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 0656f27d1042..b2db010d851e 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -68,3 +68,16 @@ Description: (RO) Read returns power management information specific to the QAT device. This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/cnv_errors +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns, for each Acceleration Engine (AE), the number + of errors and the type of the last error detected by the device + when performing verified compression. + Reported counters:: + + : Number of Compress and Verify (CnV) errors and type + of the last CnV error detected by Acceleration + Engine N. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 2f0330651622..bb3b2516e6c6 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -33,6 +33,7 @@ intel_qat-objs := adf_cfg.o \ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 36c6a6bf4a66..3674904d0527 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -318,6 +318,7 @@ struct adf_accel_dev { atomic_t ref_count; struct dentry *debugfs_dir; struct dentry *fw_cntr_dbgfile; + struct dentry *cnv_dbgfile; struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 2d45167b48a0..3a04e743497f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -406,6 +406,27 @@ int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, return ret; } +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, + u16 *latest_err) +{ + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + int ret; + + req.cmd_id = ICP_QAT_FW_CNV_STATS_GET; + + ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp); + if (ret) + return ret; + if (resp.status) + return -EPROTONOSUPPORT; + + *err_cnt = resp.error_count; + *latest_err = resp.latest_error; + + return ret; +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c new file mode 100644 index 000000000000..aa5b6ff1dfb4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" +#include "qat_compression.h" + +#define CNV_DEBUGFS_FILENAME "cnv_errors" +#define CNV_MIN_PADDING 16 + +#define CNV_ERR_INFO_MASK GENMASK(11, 0) +#define CNV_ERR_TYPE_MASK GENMASK(15, 12) +#define CNV_SLICE_ERR_MASK GENMASK(7, 0) +#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 +#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 + +enum cnv_error_type { + CNV_ERR_TYPE_NONE, + CNV_ERR_TYPE_CHECKSUM, + CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH, + CNV_ERR_TYPE_DECOMPRESSION, + CNV_ERR_TYPE_TRANSLATION, + CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH, + CNV_ERR_TYPE_UNKNOWN, + CNV_ERR_TYPES_COUNT +}; + +#define CNV_ERROR_TYPE_GET(latest_err) \ + min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN) + +#define CNV_GET_DELTA_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_SLICE_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_DEFAULT_ERR_INFO(latest_error) \ + u16_get_bits(latest_error, CNV_ERR_INFO_MASK) + +enum cnv_fields { + CNV_ERR_COUNT, + CNV_LATEST_ERR, + CNV_FIELDS_COUNT +}; + +static const char * const cnv_field_names[CNV_FIELDS_COUNT] = { + [CNV_ERR_COUNT] = "Total Errors", + [CNV_LATEST_ERR] = "Last Error", +}; + +static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = { + [CNV_ERR_TYPE_NONE] = "No Error", + [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error", + [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P", + [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error", + [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error", + [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C", + [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error", +}; + +struct ae_cnv_errors { + u16 ae; + u16 err_cnt; + u16 latest_err; + bool is_comp_ae; +}; + +struct cnv_err_stats { + u16 ae_count; + struct ae_cnv_errors ae_cnv_errors[]; +}; + +static s16 get_err_info(u8 error_type, u16 latest) +{ + switch (error_type) { + case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH: + case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH: + return CNV_GET_DELTA_ERR_INFO(latest); + case CNV_ERR_TYPE_DECOMPRESSION: + case CNV_ERR_TYPE_TRANSLATION: + return CNV_GET_SLICE_ERR_INFO(latest); + default: + return CNV_GET_DEFAULT_ERR_INFO(latest); + } +} + +static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v, + loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + (*pos)++; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v) +{ +} + +static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v) +{ + struct ae_cnv_errors *ae_errors; + unsigned int i; + s16 err_info; + u8 err_type; + + if (v == SEQ_START_TOKEN) { + seq_puts(sfile, "AE "); + for (i = 0; i < CNV_FIELDS_COUNT; ++i) + seq_printf(sfile, " %*s", CNV_MIN_PADDING, + cnv_field_names[i]); + } else { + ae_errors = v; + + if (!ae_errors->is_comp_ae) + return 0; + + err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err); + err_info = get_err_info(err_type, ae_errors->latest_err); + + seq_printf(sfile, "%d:", ae_errors->ae); + seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt); + seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING, + cnv_error_names[err_type], err_info); + } + seq_putc(sfile, '\n'); + + return 0; +} + +static const struct seq_operations qat_cnv_errors_sops = { + .start = qat_cnv_errors_seq_start, + .next = qat_cnv_errors_seq_next, + .stop = qat_cnv_errors_seq_stop, + .show = qat_cnv_errors_seq_show, +}; + +/** + * cnv_err_stats_alloc() - Get CNV stats for the provided device. + * @accel_dev: Pointer to a QAT acceleration device + * + * Allocates and populates table of CNV errors statistics for each non-admin AE + * available through the supplied acceleration device. The caller becomes the + * owner of such memory and is responsible for the deallocation through a call + * to kfree(). + * + * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success + * or a negative value on error. + */ +static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct cnv_err_stats *err_stats; + unsigned long ae_count; + unsigned long ae_mask; + size_t err_stats_size; + unsigned long ae; + unsigned int i; + u16 latest_err; + u16 err_cnt; + int ret; + + if (!adf_dev_started(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); + return ERR_PTR(-EBUSY); + } + + /* Ignore the admin AEs */ + ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; + ae_count = hweight_long(ae_mask); + if (unlikely(!ae_count)) + return ERR_PTR(-EINVAL); + + err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count); + err_stats = kmalloc(err_stats_size, GFP_KERNEL); + if (!err_stats) + return ERR_PTR(-ENOMEM); + + err_stats->ae_count = ae_count; + + i = 0; + for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { + ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "Failed to get CNV stats for ae %ld, [%d].\n", + ae, ret); + err_stats->ae_cnv_errors[i++].is_comp_ae = false; + continue; + } + err_stats->ae_cnv_errors[i].is_comp_ae = true; + err_stats->ae_cnv_errors[i].latest_err = latest_err; + err_stats->ae_cnv_errors[i].err_cnt = err_cnt; + err_stats->ae_cnv_errors[i].ae = ae; + i++; + } + + return err_stats; +} + +static int qat_cnv_errors_file_open(struct inode *inode, struct file *file) +{ + struct adf_accel_dev *accel_dev = inode->i_private; + struct seq_file *cnv_errors_seq_file; + struct cnv_err_stats *cnv_err_stats; + int ret; + + cnv_err_stats = cnv_err_stats_alloc(accel_dev); + if (IS_ERR(cnv_err_stats)) + return PTR_ERR(cnv_err_stats); + + ret = seq_open(file, &qat_cnv_errors_sops); + if (unlikely(ret)) { + kfree(cnv_err_stats); + return ret; + } + + cnv_errors_seq_file = file->private_data; + cnv_errors_seq_file->private = cnv_err_stats; + return ret; +} + +static int qat_cnv_errors_file_release(struct inode *inode, struct file *file) +{ + struct seq_file *cnv_errors_seq_file = file->private_data; + + kfree(cnv_errors_seq_file->private); + cnv_errors_seq_file->private = NULL; + + return seq_release(inode, file); +} + +static const struct file_operations qat_cnv_fops = { + .owner = THIS_MODULE, + .open = qat_cnv_errors_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qat_cnv_errors_file_release, +}; + +static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + char *file_msg = "No engine configured for comp\n"; + + return simple_read_from_buffer(buf, count, pos, file_msg, + strlen(file_msg)); +} + +static const struct file_operations qat_cnv_no_comp_fops = { + .owner = THIS_MODULE, + .read = no_comp_file_read, +}; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + const struct file_operations *fops; + void *data; + + if (adf_hw_dev_has_compression(accel_dev)) { + fops = &qat_cnv_fops; + data = accel_dev; + } else { + fops = &qat_cnv_no_comp_fops; + data = NULL; + } + + accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400, + accel_dev->debugfs_dir, + data, fops); +} + +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + debugfs_remove(accel_dev->cnv_dbgfile); + accel_dev->cnv_dbgfile = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h new file mode 100644 index 000000000000..b02b0961c433 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_CNV_DBG_H +#define ADF_CNV_DBG_H + +struct adf_accel_dev; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 46dd81074166..18a382508542 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -96,6 +96,7 @@ int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 395bb493f20c..477efcc81a16 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -5,6 +5,7 @@ #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" @@ -64,6 +65,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); adf_pm_dbgfs_add(accel_dev); + adf_cnv_dbgfs_add(accel_dev); } } @@ -77,6 +79,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) return; if (!accel_dev->is_vf) { + adf_cnv_dbgfs_rm(accel_dev); adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 2ebbec75d778..9e5ce419d875 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -19,6 +19,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, + ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, ICP_QAT_FW_PM_INFO = 129, }; @@ -65,6 +66,10 @@ struct icp_qat_fw_init_admin_resp { __u16 version_major_num; }; __u32 extended_features; + struct { + __u16 error_count; + __u16 latest_error; + }; }; __u64 opaque_data; union { -- Gitee From fe554f701650c966bc72e44dec7ef0586c3a6d60 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:45 +0100 Subject: [PATCH 725/953] crypto: qat - add infrastructure for error reporting ANBZ: #8589 commit 93b2f7de7db598b0fe429948c739c212f8316330 upstream. Intel-SIG: commit 93b2f7de7db5 crypto: qat - add infrastructure for error reporting Backport to support Intel QAT in-tree driver Add infrastructure for enabling, disabling and reporting errors in the QAT driver. This adds a new structure, adf_ras_ops, to adf_hw_device_data that contains the following methods: - enable_ras_errors(): allows to enable RAS errors at device initialization. - disable_ras_errors(): allows to disable RAS errors at device shutdown. - handle_interrupt(): allows to detect if there is an error and report if a reset is required. This is executed immediately after the error is reported, in the context of an ISR. An initial, empty, implementation of the methods above is provided for QAT GEN4. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 ++ drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 8 ++++++ .../intel/qat/qat_common/adf_gen4_ras.c | 26 +++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 10 +++++++ .../crypto/intel/qat/qat_common/adf_init.c | 6 +++++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 18 +++++++++++++ 7 files changed, 71 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 615af0883207..0641cb76ebf0 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_gen4_ras.h" #include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -571,6 +572,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index bb3b2516e6c6..9ba2f8aa1e81 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -19,6 +19,7 @@ intel_qat-objs := adf_cfg.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ + adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ qat_crypto.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 3674904d0527..eb43a6cfa99e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -152,6 +152,13 @@ struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; +struct adf_ras_ops { + void (*enable_ras_errors)(struct adf_accel_dev *accel_dev); + void (*disable_ras_errors)(struct adf_accel_dev *accel_dev); + bool (*handle_interrupt)(struct adf_accel_dev *accel_dev, + bool *reset_required); +}; + struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); @@ -215,6 +222,7 @@ struct adf_hw_device_data { struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; + struct adf_ras_ops ras_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c new file mode 100644 index 000000000000..0bf243a51527 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_common_drv.h" +#include "adf_gen4_ras.h" + +static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) +{ +} + +static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) +{ +} + +static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, + bool *reset_required) +{ + return false; +} + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen4_enable_ras; + ras_ops->disable_ras_errors = adf_gen4_disable_ras; + ras_ops->handle_interrupt = adf_gen4_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h new file mode 100644 index 000000000000..2765d3529c0d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_RAS_H_ +#define ADF_GEN4_RAS_H_ + +struct adf_ras_ops; + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN4_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index b4cf605ccf3e..4cf49f52d4dd 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -119,6 +119,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + if (hw_data->ras_ops.enable_ras_errors) + hw_data->ras_ops.enable_ras_errors(accel_dev); + hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); @@ -353,6 +356,9 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + if (hw_data->ras_ops.disable_ras_errors) + hw_data->ras_ops.disable_ras_errors(accel_dev); + adf_heartbeat_shutdown(accel_dev); hw_data->disable_iov(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 2aba194a7c29..3557a0d6dea2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -132,6 +132,21 @@ static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) return false; } +static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) +{ + struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; + bool reset_required; + + if (ras_ops->handle_interrupt && + ras_ops->handle_interrupt(accel_dev, &reset_required)) { + if (reset_required) + dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + return true; + } + + return false; +} + static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; @@ -145,6 +160,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) if (adf_handle_pm_int(accel_dev)) return IRQ_HANDLED; + if (adf_handle_ras_int(accel_dev)) + return IRQ_HANDLED; + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); -- Gitee From b1d737f62f58311560df2cec8b265af787a8a24a Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:46 +0100 Subject: [PATCH 726/953] crypto: qat - add reporting of correctable errors for QAT GEN4 ANBZ: #8589 commit df8c184b77a9c6d52e6c7627bbcb902cdc4d2171 upstream. Intel-SIG: commit df8c184b77a9 crypto: qat - add reporting of correctable errors for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect and report correctable errors in QAT GEN4 devices. This includes (1) enabling, disabling and handling error reported through the ERRSOU0 register and (2) logic to log the errors in the system log. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 64 ++++++++++++++++++- .../intel/qat/qat_common/adf_gen4_ras.h | 11 ++++ 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 0bf243a51527..4fbaadbe480e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1,20 +1,82 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2023 Intel Corporation */ #include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" #include "adf_gen4_ras.h" +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); +} + +static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0); + + aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error detected in AE: 0x%x\n", + aecorrerr); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { - return false; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); + bool handled = false; + + *reset_required = false; + + if (errsou & ADF_GEN4_ERRSOU0_BIT) { + adf_gen4_process_errsou0(accel_dev, csr); + handled = true; + } + + return handled; } void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 2765d3529c0d..e6c4dfbb2389 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -3,8 +3,19 @@ #ifndef ADF_GEN4_RAS_H_ #define ADF_GEN4_RAS_H_ +#include + struct adf_ras_ops; +/* ERRSOU0 Correctable error mask*/ +#define ADF_GEN4_ERRSOU0_BIT BIT(0) + +/* HI AE Correctable error log */ +#define ADF_GEN4_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 + void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); #endif /* ADF_GEN4_RAS_H_ */ -- Gitee From 67474e26964b45722a75ca4937adecf532aa0783 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:47 +0100 Subject: [PATCH 727/953] crypto: qat - add reporting of errors from ERRSOU1 for QAT GEN4 ANBZ: #8589 commit 4926e89d19b0631d8f5f5f292c4caf0f0de08f4f upstream. Intel-SIG: commit 4926e89d19b0 crypto: qat - add reporting of errors from ERRSOU1 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect and report uncorrectable errors reported through the ERRSOU1 register in QAT GEN4 devices. This also introduces the adf_dev_err_mask structure as part of adf_hw_device_data which will allow to provide different error masks per device generation. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 6 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_gen4_ras.c | 289 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 190 ++++++++++++ 5 files changed, 493 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 0641cb76ebf0..5056cd2c6e9f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -505,6 +505,11 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) return fw_config[obj_num].ae_mask; } +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; +} + void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; @@ -569,6 +574,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index bb3d95a8fb21..7695b4e7277e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -28,6 +28,8 @@ #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) +#define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F + #define ADF_4XXX_ETR_MAX_BANKS 64 /* MSIX interrupt */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index eb43a6cfa99e..c173873b3e2b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -176,6 +176,10 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct adf_dev_err_mask { + u32 cppagentcmdpar_mask; +}; + struct adf_hw_device_data { struct adf_hw_device_class *dev_class; u32 (*get_accel_mask)(struct adf_hw_device_data *self); @@ -223,6 +227,7 @@ struct adf_hw_device_data { struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; struct adf_ras_ops ras_ops; + struct adf_dev_err_mask dev_err_mask; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -271,6 +276,7 @@ struct adf_hw_device_data { #define GET_SRV_TYPE(accel_dev, idx) \ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \ & ADF_SRV_TYPE_MASK) +#define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 4fbaadbe480e..59ae5a574091 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -8,12 +8,18 @@ static void enable_errsou_reporting(void __iomem *csr) { /* Enable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); } static void disable_errsou_reporting(void __iomem *csr) { /* Disable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -23,12 +29,73 @@ static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, /* Enable Acceleration Engine correctable error reporting */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask); } static void disable_ae_error_reporting(void __iomem *csr) { /* Disable Acceleration Engine correctable error reporting */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, + err_mask->cppagentcmdpar_mask); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + /* Enable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK); + + /* Enable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT); + + /* Enable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + /* Disable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0); + + /* Disable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, + ADF_GEN4_TI_CI_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, + ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, + ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, + ADF_GEN4_TI_CD_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, + ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); } static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) @@ -37,6 +104,8 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_errsou_reporting(csr); enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) @@ -45,6 +114,8 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_errsou_reporting(csr); disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -62,6 +133,218 @@ static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } +static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aeuncorerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT)) + return false; + + aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0); + aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error detected in AE: 0x%x\n", + aeuncorerr); + + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); + + return false; +} + +static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 cmdparerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT)) + return false; + + cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG); + cmdparerr &= err_mask->cppagentcmdpar_mask; + + dev_err(&GET_DEV(accel_dev), + "HI CPP agent command parity error: 0x%x\n", + cmdparerr); + + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); + + return true; +} + +static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return false; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity uncorrectable error: 0x%x\n", + rimem_parerr_sts); + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity fatal error: 0x%x\n", + rimem_parerr_sts); + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts); + + return reset_required; +} + +static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_ci_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK; + + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + } + + return false; +} + +static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pullfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK; + + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, + ti_pullfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pushfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK; + + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, + ti_pushfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_cd_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK; + + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); + } + + return false; +} + +static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_trnsb_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK; + + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } + + return false; +} + +static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 rimiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS); + rimiscsts &= ADF_GEN4_RIMISCSTS_BIT; + + dev_err(&GET_DEV(accel_dev), + "Command Parity error detected on IOSFP: 0x%x\n", + rimiscsts); + + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); + + return true; +} + +static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou); + *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou); + *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { @@ -76,6 +359,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1); + if (errsou & ADF_GEN4_ERRSOU1_BITMASK) { + adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index e6c4dfbb2389..67a85cc74a44 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -15,6 +15,196 @@ struct adf_ras_ops; /* HI AE Correctable error log enable */ #define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 +#define ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT BIT(0) +#define ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT BIT(1) +#define ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN4_ERRSOU1_RIMISCSTS_BIT BIT(4) + +#define ADF_GEN4_ERRSOU1_BITMASK ( \ + (ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT) | \ + (ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMISCSTS_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN4_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent Command parity error logging enable */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +/* RI Memory parity error status register */ +#define ADF_GEN4_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN4_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(0) - BIT(3) - ri_iosf_pdata_rxq[0:3] parity error + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - BIT(9) - ri_tlq_cplhdr[0:1] parity error + * BIT(10) - BIT(17) - ri_tlq_cpldata[0:7] parity error + * BIT(18) - set this bit to 1 to enable logging status to ri_mem_par_err_sts0 + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \ + BIT(7) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | \ + BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | BIT(28) | BIT(30)) + +#define ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK \ + (BIT(4) | BIT(6) | BIT(8) | BIT(9)) + +/* TI CI parity status */ +#define ADF_GEN4_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN4_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + * BIT(7) - CPP_SkidQ_sc_sts parity error status + */ +#define ADF_GEN4_TI_CI_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(3) | BIT(7)) + +/* TI PULLFUB parity status */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN4_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN4_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_CD_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN4_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB Parity error reporting mask */ +#define ADF_GEN4_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN4_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN4_RIMISCCTL 0x41B1BC + +/* Command Parity error detected on IOSFP Command to QAT */ +#define ADF_GEN4_RIMISCSTS_BIT BIT(0) void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); -- Gitee From d2f6e3030f36f886b93957b2e8b527413e8324b8 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:48 +0100 Subject: [PATCH 728/953] crypto: qat - add handling of errors from ERRSOU2 for QAT GEN4 ANBZ: #8589 commit 895f7d532c843f49e0b6dc8341bb911b26da4731 upstream. Intel-SIG: commit 895f7d532c84 crypto: qat - add handling of errors from ERRSOU2 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle uncorrectable errors reported through the ERRSOU2 register in QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 15 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_gen4_ras.c | 709 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 320 ++++++++ 5 files changed, 1055 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 5056cd2c6e9f..695a1f149362 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -508,6 +508,11 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) { dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK; } void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index 7695b4e7277e..efd5dadc19ed 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -29,6 +29,21 @@ #define ADF_4XXX_ADMIN_AE_MASK (0x100) #define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F +#define ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK 0xF000F +#define ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK 0x10001 +#define ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK 0x30007 +#define ADF_4XXX_PARITYERRORMASK_PKE_MASK 0x3F + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(23) - enable parity detection on SPPs + */ +#define ADF_4XXX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) #define ADF_4XXX_ETR_MAX_BANKS 64 diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index c173873b3e2b..c8492d792c0e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -178,6 +178,12 @@ struct adf_dc_ops { struct adf_dev_err_mask { u32 cppagentcmdpar_mask; + u32 parerr_ath_cph_mask; + u32 parerr_cpr_xlt_mask; + u32 parerr_dcpr_ucs_mask; + u32 parerr_pke_mask; + u32 parerr_wat_wcp_mask; + u32 ssmfeatren_mask; }; struct adf_hw_device_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 59ae5a574091..877abed683d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -11,15 +11,30 @@ static void enable_errsou_reporting(void __iomem *csr) /* Enable uncorrectable error reporting in ERRSOU1 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt and CFC attention interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, + ADF_GEN4_ERRSOU2_PM_INT_BIT | + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); } static void disable_errsou_reporting(void __iomem *csr) { + u32 val = 0; + /* Disable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); /* Disable uncorrectable error reporting in ERRSOU1 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); + val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -51,12 +66,18 @@ static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, /* Enable HI CPP Agents Command Parity Error Reporting */ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, err_mask->cppagentcmdpar_mask); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK); } static void disable_cpp_error_reporting(void __iomem *csr) { /* Disable HI CPP Agents Command Parity Error Reporting */ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK); } static void enable_ti_ri_error_reporting(void __iomem *csr) @@ -98,6 +119,138 @@ static void disable_ti_ri_error_reporting(void __iomem *csr) ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); } +static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable RF parity error in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0); +} + +static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Disable RF Parity Error reporting in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, + ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0); + + /* Enable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val |= err_mask->ssmfeatren_mask; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Enable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, + ADF_GEN4_SER_EN_SSMSH_BITMASK); + + /* Enable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0); + + /* Enable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0); +} + +static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, + ADF_GEN4_INTMASKSSM_BITMASK); + + /* Disable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Disable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0); + + /* Disable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); + + /* Disable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { void __iomem *csr = adf_get_pmisc_base(accel_dev); @@ -106,6 +259,8 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_ae_error_reporting(accel_dev, csr); enable_cpp_error_reporting(accel_dev, csr); enable_ti_ri_error_reporting(csr); + enable_rf_error_reporting(accel_dev, csr); + enable_ssm_error_reporting(accel_dev, csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) @@ -116,6 +271,8 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_ae_error_reporting(csr); disable_cpp_error_reporting(csr); disable_ti_ri_error_reporting(csr); + disable_rf_error_reporting(accel_dev, csr); + disable_ssm_error_reporting(accel_dev, csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -345,6 +502,552 @@ static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); } +static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH); + reg &= ADF_GEN4_UERRSSMSH_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error on ssm shared memory: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); + + return false; +} + +static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH); + reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); + + return false; +} + +static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR); + reg &= ADF_GEN4_PPERR_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error CPP transaction on memory target: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); + + return false; +} + +static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 slice_hang_offset, + char *slice_name) +{ + u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset); + + if (!slice_hang_reg) + return; + + dev_err(&GET_DEV(accel_dev), + "Slice %s hang error encountered\n", slice_name); +} + +static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT)) + return false; + + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_PKE, "pke"); + + if (err_mask->parerr_wat_wcp_mask) + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, + "ath_cph"); + + return false; +} + +static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); + } + } + + return false; +} + +static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error PKE: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, + reg); + } + } + + return false; +} + +static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT)) + return false; + + reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required = false; + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SSM CPP parity error: 0x%x\n", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "non-Fatal SSM CPP parity error: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); + + return reset_required; +} + +static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); + reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, + reg); + } + + dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); + + return false; +} + +static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required = false; + u32 reg; + + if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) + dev_warn(&GET_DEV(accel_dev), + "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); + + return reset_required; +} + +static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM); + bool reset_required; + + iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK; + if (!iastatssm) + return false; + + reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); + reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); + + return reset_required; +} + +static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) + return false; + + return adf_handle_iaintstatssm(accel_dev, csr); +} + +static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 reg; + + if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: data parity: 0x%x", reg); + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: command parity: 0x%x", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: multiple errors: 0x%x", reg); + + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK); + + return reset_required; +} + +static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_ssm(accel_dev, csr, errsou); + *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { @@ -365,6 +1068,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2); + if (errsou & ADF_GEN4_ERRSOU2_BITMASK) { + adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 67a85cc74a44..65c1b7925444 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -203,6 +203,326 @@ struct adf_ras_ops; /* Status control register to log misc RI error */ #define ADF_GEN4_RIMISCCTL 0x41B1BC +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts, deprecated on gen4 devices + * BIT(18) - PM interrupt + */ +#define ADF_GEN4_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN4_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN4_ERRSOU2_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN4_ERRSOU2_DIS_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK) + +#define ADF_GEN4_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT BIT(0) +#define ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT BIT(1) +#define ADF_GEN4_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT BIT(3) +#define ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT BIT(4) +#define ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT BIT(5) +#define ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT BIT(6) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT BIT(7) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT BIT(8) + +#define ADF_GEN4_IAINTSTATSSM_BITMASK \ + (ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT) + +#define ADF_GEN4_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit masks definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN4_UERRSSMSH_BITMASK (BIT(0) | BIT(15)) + +#define ADF_GEN4_UERRSSMSHAD 0x1C + +#define ADF_GEN4_CERRSSMSH 0x10 + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN4_CERRSSMSH_ERROR_BIT BIT(0) + +#define ADF_GEN4_CERRSSMSHAD 0x14 + +/* SSM error handling features enable register */ +#define ADF_GEN4_SSMFEATREN 0x198 + +/* + * Disable SSM error detection and reporting features + * enabled by device driver on RAS initialization + * + * following bits should be cleared : + * BIT(4) - Disable parity for CPP parity + * BIT(12) - Disable logging push/pull data error in pperr register. + * BIT(16) - BIT(23) - Disable parity for SPPs + * BIT(24) - BIT(27) - Disable parity for SPPs, if it's supported on the device. + */ +#define ADF_GEN4_SSMFEATREN_DIS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(13) | BIT(14) | BIT(15)) + +#define ADF_GEN4_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(1) - Shared memory correctable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(3) - CPP parity error Interrupt mask + * BIT(4) - SSM interrupt generated by SER correctable error mask + * BIT(5) - SSM interrupt generated by SER uncorrectable error + * - not stop and scream - mask + */ +#define ADF_GEN4_INTMASKSSM_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)) + +/* CPP push or pull error */ +#define ADF_GEN4_PPERR 0x8 + +#define ADF_GEN4_PPERR_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_PPERRID 0xC + +/* Slice hang handling related registers */ +#define ADF_GEN4_SLICEHANGSTATUS_ATH_CPH 0x84 +#define ADF_GEN4_SLICEHANGSTATUS_CPR_XLT 0x88 +#define ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS 0x90 +#define ADF_GEN4_SLICEHANGSTATUS_WAT_WCP 0x8C +#define ADF_GEN4_SLICEHANGSTATUS_PKE 0x94 + +#define ADF_GEN4_SHINTMASKSSM_ATH_CPH 0xF0 +#define ADF_GEN4_SHINTMASKSSM_CPR_XLT 0xF4 +#define ADF_GEN4_SHINTMASKSSM_DCPR_UCS 0xFC +#define ADF_GEN4_SHINTMASKSSM_WAT_WCP 0xF8 +#define ADF_GEN4_SHINTMASKSSM_PKE 0x100 + +/* SPP pull cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH 0x1A4 +#define ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT 0x1A8 +#define ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS 0x1B0 +#define ADF_GEN4_SPPPULLCMDPARERR_PKE 0x1B4 +#define ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP 0x1AC + +/* SPP pull data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH 0x1BC +#define ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT 0x1C0 +#define ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS 0x1C8 +#define ADF_GEN4_SPPPULLDATAPARERR_PKE 0x1CC +#define ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP 0x1C4 + +/* SPP push cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH 0x1D4 +#define ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT 0x1D8 +#define ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS 0x1E0 +#define ADF_GEN4_SPPPUSHCMDPARERR_PKE 0x1E4 +#define ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP 0x1DC + +/* SPP push data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH 0x1EC +#define ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT 0x1F0 +#define ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS 0x1F8 +#define ADF_GEN4_SPPPUSHDATAPARERR_PKE 0x1FC +#define ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP 0x1F4 + +/* Accelerator SPP parity error mask registers */ +#define ADF_GEN4_SPPPARERRMSK_ATH_CPH 0x204 +#define ADF_GEN4_SPPPARERRMSK_CPR_XLT 0x208 +#define ADF_GEN4_SPPPARERRMSK_DCPR_UCS 0x210 +#define ADF_GEN4_SPPPARERRMSK_PKE 0x214 +#define ADF_GEN4_SPPPARERRMSK_WAT_WCP 0x20C + +#define ADF_GEN4_SSMCPPERR 0x224 + +/* + * Uncorrectable error mask in SSMCPPERR + * BIT(0) - indicates CPP command parity error + * BIT(1) - indicates CPP Main Push PPID parity error + * BIT(2) - indicates CPP Main ePPID parity error + * BIT(3) - indicates CPP Main push data parity error + * BIT(4) - indicates CPP Main Pull PPID parity error + * BIT(5) - indicates CPP target pull data parity error + */ +#define ADF_GEN4_SSMCPPERR_FATAL_BITMASK \ + (BIT(0) | BIT(1) | BIT(4)) + +#define ADF_GEN4_SSMCPPERR_UNCERR_BITMASK \ + (BIT(2) | BIT(3) | BIT(5)) + +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC 0x9C +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC 0xB8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH 0xA0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH 0xBC + +#define ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT 0xA4 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT 0xC0 + +#define ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS 0xAC +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS 0xC8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_PKE 0xB0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE 0xCC + +#define ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP 0xA8 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP 0xC4 + +/* RF parity error detected in SharedRAM */ +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT BIT(0) + +#define ADF_GEN4_SER_ERR_SSMSH 0x44C + +/* + * Fatal error mask in SER_ERR_SSMSH + * BIT(0) - Indicates an uncorrectable error has occurred in the + * accelerator controller command RFs + * BIT(2) - Parity error occurred in the bank SPP fifos + * BIT(3) - Indicates Parity error occurred in following fifos in + * the design + * BIT(4) - Parity error occurred in flops in the design + * BIT(5) - Uncorrectable error has occurred in the + * target push and pull data register flop + * BIT(7) - Indicates Parity error occurred in the Resource Manager + * pending lock request fifos + * BIT(8) - Indicates Parity error occurred in the Resource Manager + * MECTX command queues logic + * BIT(9) - Indicates Parity error occurred in the Resource Manager + * MECTX sigdone fifo flops + * BIT(10) - Indicates an uncorrectable error has occurred in the + * Resource Manager MECTX command RFs + * BIT(14) - Parity error occurred in Buffer Manager sigdone FIFO + */ + #define ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK \ + (BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(14)) + +/* + * Uncorrectable error mask in SER_ERR_SSMSH + * BIT(12) Parity error occurred in Buffer Manager pool 0 + * BIT(13) Parity error occurred in Buffer Manager pool 1 + */ +#define ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK \ + (BIT(12) | BIT(13)) + +/* + * Correctable error mask in SER_ERR_SSMSH + * BIT(1) - Indicates a correctable Error has occurred + * in the slice controller command RFs + * BIT(6) - Indicates a correctable Error has occurred in + * the target push and pull data RFs + * BIT(11) - Indicates an correctable Error has occurred in + * the Resource Manager MECTX command RFs + */ +#define ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK \ + (BIT(1) | BIT(6) | BIT(11)) + +/* SSM shared memory SER error reporting mask */ +#define ADF_GEN4_SER_EN_SSMSH 0x450 + +/* + * SSM SER error reporting mask in SER_en_err_ssmsh + * BIT(0) - Enables uncorrectable Error detection in : + * 1) slice controller command RFs. + * 2) target push/pull data registers + * BIT(1) - Enables correctable Error detection in : + * 1) slice controller command RFs + * 2) target push/pull data registers + * BIT(2) - Enables Parity error detection in + * 1) bank SPP fifos + * 2) gen4_pull_id_queue + * 3) gen4_push_id_queue + * 4) AE_pull_sigdn_fifo + * 5) DT_push_sigdn_fifo + * 6) slx_push_sigdn_fifo + * 7) secure_push_cmd_fifo + * 8) secure_pull_cmd_fifo + * 9) Head register in FIFO wrapper + * 10) current_cmd in individual push queue + * 11) current_cmd in individual pull queue + * 12) push_command_rxp arbitrated in ssm_push_cmd_queues + * 13) pull_command_rxp arbitrated in ssm_pull_cmd_queues + * BIT(3) - Enables uncorrectable Error detection in + * the resource manager mectx cmd RFs. + * BIT(4) - Enables correctable error detection in the Resource Manager + * mectx command RFs + * BIT(5) - Enables Parity error detection in + * 1) resource manager lock request fifo + * 2) mectx cmdqueues logic + * 3) mectx sigdone fifo + * BIT(6) - Enables Parity error detection in Buffer Manager pools + * and sigdone fifo + */ +#define ADF_GEN4_SER_EN_SSMSH_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicated CPP CFC data parity error type + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) + +/* + * BIT(0) - Enables CFC to detect and log push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for CPP error + * BIT(4) - When 1 Parity detection is disabled + * BIT(5) - When 1 Parity detection is disabled on CPP command bus + * BIT(6) - When 1 Parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +#define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C +#define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From 28c690880fc51a580ed12599a5b3e1fde9de2439 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:49 +0100 Subject: [PATCH 729/953] crypto: qat - add handling of compression related errors for QAT GEN4 ANBZ: #8589 commit b67bf7babe36c6c15623ec22ed13ec9069a6cf37 upstream. Intel-SIG: commit b67bf7babe36 crypto: qat - add handling of compression related errors for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle correctable and uncorrectable errors related to the compression hardware. These are detected through the EXPRPSSMXLT, EXPRPSSMCPR and EXPRPSSMDCPR registers. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 76 ++++++++++++++++++- .../intel/qat/qat_common/adf_gen4_ras.h | 76 +++++++++++++++++++ 2 files changed, 151 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 877abed683d8..285b755e13be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -996,13 +996,87 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, return reset_required; } +static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR); + + reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK; + if (!reg) + return false; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM CMP: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); + + return false; +} + +static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT); + + reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMXLT_CERR_BIT; + if (!reg) + return false; + + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM XLT: 0x%x", reg); + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM XLT: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); + + return false; +} + +static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg; + int i; + + for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) { + reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i)); + reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK; + if (!reg) + continue; + + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM DCMP: 0x%x", reg); + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM DCMP: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); + } + + return false; +} + static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) { + bool reset_required; + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) return false; - return adf_handle_iaintstatssm(accel_dev, csr); + reset_required = adf_handle_iaintstatssm(accel_dev, csr); + reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr); + reset_required |= adf_handle_exprpssmxlt(accel_dev, csr); + reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr); + + return reset_required; } static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 65c1b7925444..e3583c3ed827 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -523,6 +523,82 @@ struct adf_ras_ops; #define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C #define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 +/* Exception reporting in QAT SSM CMP */ +#define ADF_GEN4_EXPRPSSMCPR 0x2000 + +/* + * Uncorrectable error mask in EXPRPSSMCPR + * BIT(2) - Hard fatal error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in CPR Hash Table + * BIT(19) - Parity error detected in CPR History Buffer Copy 0 + * BIT(20) - Parity error detected in CPR History Buffer Copy 1 + * BIT(21) - Parity error detected in CPR History Buffer Copy 2 + * BIT(22) - Parity error detected in CPR History Buffer Copy 3 + * BIT(23) - Parity error detected in CPR History Buffer Copy 4 + * BIT(24) - Parity error detected in CPR History Buffer Copy 5 + * BIT(25) - Parity error detected in CPR History Buffer Copy 6 + * BIT(26) - Parity error detected in CPR History Buffer Copy 7 + */ +#define ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26)) + +/* Exception reporting in QAT SSM XLT */ +#define ADF_GEN4_EXPRPSSMXLT 0xA000 + +/* + * Uncorrectable error mask in EXPRPSSMXLT + * BIT(2) - If set, an Uncorrectable Error event occurred + * BIT(16) - Parity error detected in XLT Push FIFO + * BIT(17) - Parity error detected in XLT Pull FIFO + * BIT(18) - Parity error detected in XLT HCTB0 + * BIT(19) - Parity error detected in XLT HCTB1 + * BIT(20) - Parity error detected in XLT HCTB2 + * BIT(21) - Parity error detected in XLT HCTB3 + * BIT(22) - Parity error detected in XLT CBCL + * BIT(23) - Parity error detected in XLT LITPTR + */ +#define ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23)) + +/* + * Correctable error mask in EXPRPSSMXLT + * BIT(3) - Correctable error event occurred. + */ +#define ADF_GEN4_EXPRPSSMXLT_CERR_BIT BIT(3) + +/* Exception reporting in QAT SSM DCMP */ +#define ADF_GEN4_EXPRPSSMDCPR(_n_) (0x12000 + (_n_) * 0x80) + +/* + * Uncorrectable error mask in EXPRPSSMDCPR + * BIT(2) - Even hard fatal error + * BIT(4) - Odd hard fatal error + * BIT(6) - decode soft error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in the Input Buffer + * BIT(19) - symbuf0parerr + * Parity error detected in CPR Push FIFO + * BIT(20) - symbuf1parerr + * Parity error detected in CPR Push FIFO + */ +#define ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(4) | BIT(6) | BIT(16) | BIT(17) | \ + BIT(18) | BIT(19) | BIT(20)) + +/* + * Correctable error mask in EXPRPSSMDCPR + * BIT(3) - Even ecc correctable error + * BIT(5) - Odd ecc correctable error + */ +#define ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK (BIT(3) | BIT(5)) + +#define ADF_GEN4_DCPR_SLICES_NUM 3 + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From 7895745f1d9c20a3317e71a460796b19c512f0a8 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:50 +0100 Subject: [PATCH 730/953] crypto: qat - add adf_get_aram_base() helper function ANBZ: #8589 commit 86df79c3a40a0085555aaa475b4b16c8728ef952 upstream. Intel-SIG: commit 86df79c3a40a crypto: qat - add adf_get_aram_base() helper function Backport to support Intel QAT in-tree driver Add the function adf_get_aram_base() which allows to return the base address of the aram bar. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_common_drv.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 18a382508542..d9342634f9c1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -248,4 +248,14 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *param; + + param = &GET_BARS(accel_dev)[hw_data->get_sram_bar_id(hw_data)]; + + return param->virt_addr; +} + #endif -- Gitee From 7dc571f23677041390add8cc6e64c7412e452a7d Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:51 +0100 Subject: [PATCH 731/953] crypto: qat - add handling of errors from ERRSOU3 for QAT GEN4 ANBZ: #8589 commit 22289dc95833c6584aea1f4e8ab9f4f1641bb076 upstream. Intel-SIG: commit 22289dc95833 crypto: qat - add handling of errors from ERRSOU3 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle uncorrectable errors reported through the ERRSOU3 register in QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 256 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 218 +++++++++++++++ 2 files changed, 474 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 285b755e13be..8ba9c9bdb89b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -19,6 +19,14 @@ static void enable_errsou_reporting(void __iomem *csr) ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, ADF_GEN4_ERRSOU2_PM_INT_BIT | ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); + + /* + * Enable uncorrectable error reporting in ERRSOU3 + * but disable RLT error interrupt and VFLR notify interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, + ADF_GEN4_ERRSOU3_RLTERROR_BIT | + ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT); } static void disable_errsou_reporting(void __iomem *csr) @@ -35,6 +43,9 @@ static void disable_errsou_reporting(void __iomem *csr) val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -82,6 +93,8 @@ static void disable_cpp_error_reporting(void __iomem *csr) static void enable_ti_ri_error_reporting(void __iomem *csr) { + u32 reg; + /* Enable RI Memory error reporting */ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | @@ -96,10 +109,26 @@ static void enable_ti_ri_error_reporting(void __iomem *csr) ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + reg |= ADF_GEN4_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); } static void disable_ti_ri_error_reporting(void __iomem *csr) { + u32 reg; + /* Disable RI Memory error reporting */ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); @@ -117,6 +146,19 @@ static void disable_ti_ri_error_reporting(void __iomem *csr) ADF_GEN4_TI_CD_PAR_STS_BITMASK); ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); } static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, @@ -251,8 +293,32 @@ static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, err_mask->parerr_wat_wcp_mask); } +static void enable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, + ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, + ADF_GEN4_REG_ARAMCERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, + ADF_GEN4_REG_ARAMUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, + ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK); +} + +static void disable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); enable_errsou_reporting(csr); @@ -261,10 +327,12 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_ti_ri_error_reporting(csr); enable_rf_error_reporting(accel_dev, csr); enable_ssm_error_reporting(accel_dev, csr); + enable_aram_error_reporting(aram_csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); disable_errsou_reporting(csr); @@ -273,6 +341,7 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_ti_ri_error_reporting(csr); disable_rf_error_reporting(accel_dev, csr); disable_ssm_error_reporting(accel_dev, csr); + disable_aram_error_reporting(aram_csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -1122,9 +1191,190 @@ static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); } +static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT)) + return false; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS); + + dev_err(&GET_DEV(accel_dev), + "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + + return true; +} + +static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK)) + return false; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS); + ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); + + return false; +} + +static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK)) + return false; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS); + ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); + + return false; +} + +static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aram_cerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT)) + return false; + + aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR); + aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "ARAM correctable error : 0x%x\n", aram_cerr); + + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); + + return false; +} + +static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 aramuerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR); + aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT | + ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT; + + if (!aramuerr) + return false; + + if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "ARAM uncorrectable error: 0x%x\n", aramuerr); + } + + aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr); + + return reset_required; +} + +static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 cppmemtgterr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR); + cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK | + ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT; + if (!cppmemtgterr) + return false; + + if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "Misc memory target multiple uncorrectable errors: 0x%x\n", + cppmemtgterr); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + } + + cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr); + + return reset_required; +} + +static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 i; + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + + if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT)) + return false; + + for (i = 0; i < max_rp_num; i++) { + u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT; + + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), + "Ring Pair (%u) ATU detected fault: 0x%x\n", i, + atufaultstatus); + + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); + } + } + + return false; +} + +static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev, + void __iomem *csr, void __iomem *aram_csr, + u32 errsou, bool *reset_required) +{ + *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); bool handled = false; @@ -1148,6 +1398,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3); + if (errsou & ADF_GEN4_ERRSOU3_BITMASK) { + adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index e3583c3ed827..53352083cd12 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -599,6 +599,224 @@ struct adf_ras_ops; #define ADF_GEN4_DCPR_SLICES_NUM 3 +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error Response Order Overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(3) - indicates ARAM correctable error + * BIT(4) - indicates ARAM uncorrectable error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates error when accessing RLT block + */ +#define ADF_GEN4_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK (BIT(1) | BIT(6)) +#define ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK (BIT(2) | BIT(5)) +#define ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT BIT(3) +#define ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT BIT(4) +#define ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN4_ERRSOU3_RLTERROR_BIT BIT(9) + +#define ADF_GEN4_ERRSOU3_BITMASK ( \ + (ADF_GEN4_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT) | \ + (ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN4_ERRSOU3_RLTERROR_BIT)) + +/* TI Misc status register */ +#define ADF_GEN4_TIMISCSTS 0x50054C + +/* TI Misc error reporting mask */ +#define ADF_GEN4_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * By setting this bit will revert to CPM1.x functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN4_TIMISCCTL_BIT BIT(0) +#define ADF_GEN4_TIMSCCTL_RELAY_BITMASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN4_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN4_RICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface status register control */ +#define ADF_GEN4_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + * BIT(4) - value of 1 enables the stop feature of the stop and stream + * for all RI CPP Command RFs + */ +#define ADF_GEN4_RICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPUSHID 0x41A334 + +/* Pull ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPULLID 0x41A338 + +/* TI CPP interface status register */ +#define ADF_GEN4_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN4_TICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN4_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + * BIT(4) - value of 1 enables TI stop part of stop and scream mode on + * CPP/RF Parity error + */ +#define ADF_GEN4_TICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPUSHID 0x500540 + +/* Pull ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPULLID 0x500544 + +/* Correctable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERR 0x1700 + +#define ADF_GEN4_REG_ARAMCERR_BIT BIT(0) + +/* + * Correctable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log correctable error + * BIT(26) - enables ARAM agent to generate interrupt for correctable error + */ +#define ADF_GEN4_REG_ARAMCERR_EN_BITMASK (BIT(3) | BIT(26)) + +/* Correctable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERRAD 0x1708 + +/* Uncorrectable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERR 0x1704 + +/* + * ARAM error bit mask + * BIT(0) - indicates error logged in ARAMCERR or ARAMUCERR + * BIT(18) - indicates uncorrectable multiple errors in ARAM agent + */ +#define ADF_GEN4_REG_ARAMUERR_ERROR_BIT BIT(0) +#define ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT BIT(18) + +/* + * Uncorrectable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log uncorrectable error + * BIT(19) - enables ARAM agent to generate interrupt for uncorrectable error + */ +#define ADF_GEN4_REG_ARAMUERR_EN_BITMASK (BIT(3) | BIT(19)) + +/* Unorrectable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERRAD 0x170C + +/* Uncorrectable error transaction push/pull ID registers*/ +#define ADF_GEN4_REG_ERRPPID_LO 0x1714 +#define ADF_GEN4_REG_ERRPPID_HI 0x1718 + +/* ARAM ECC block error enablement */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN 0x1808 + +/* + * ARAM ECC block error control bit masks + * BIT(0) - enable ARAM CD ECC block error detecting + * BIT(1) - enable ARAM pull request ECC error detecting + * BIT(2) - enable ARAM command dispatch ECC error detecting + * BIT(3) - enable ARAM read datapath push ECC error detecting + * BIT(4) - enable ARAM read datapath pull ECC error detecting + * BIT(5) - enable ARAM RMW ECC error detecting + * BIT(6) - enable ARAM write datapath RMW ECC error detecting + * BIT(7) - enable ARAM write datapath ECC error detecting + */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | \ + BIT(5) | BIT(6) | BIT(7)) + +/* ARAM misc memory target error registers*/ +#define ADF_GEN4_REG_CPPMEMTGTERR 0x1710 + +/* + * ARAM misc memory target error bit masks + * BIT(0) - indicates an error in ARAM target memory + * BIT(1) - indicates multiple errors in ARAM target memory + * BIT(4) - indicates pull error in ARAM target memory + * BIT(5) - indicates parity pull error in ARAM target memory + * BIT(6) - indicates push error in ARAM target memory + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_BITMASK \ + (BIT(0) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT BIT(1) + +/* + * ARAM misc memory target error enablement mask + * BIT(2) - enables CPP memory to detect and log push/pull data error + * BIT(7) - enables push/pull error to generate interrupts to RI + * BIT(8) - enables ARAM to check parity on pull data and CPP command buses + * BIT(9) - enables ARAM to autopush to AE when push/parity error is detected + * on lookaside DT + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK \ + (BIT(2) | BIT(7) | BIT(8) | BIT(9)) + +/* ATU fault status register */ +#define ADF_GEN4_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN4_ATUFAULTSTATUS_BIT BIT(0) + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From 3212e48f1a8ac1e9173beb9ee4ed06c55c47ea0c Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:52 +0100 Subject: [PATCH 732/953] crypto: qat - add error counters ANBZ: #8589 commit 532d7f6bc458042571752168bcb5e1fdc576b8c4 upstream. Intel-SIG: commit 532d7f6bc458 crypto: qat - add error counters Backport to support Intel QAT in-tree driver Introduce ras counters interface for counting QAT specific device errors and expose them through the newly created qat_ras sysfs group attribute. This adds the following attributes: - errors_correctable: number of correctable errors - errors_nonfatal: number of uncorrectable non fatal errors - errors_fatal: number of uncorrectable fatal errors - reset_error_counters: resets all counters These counters are initialized during device bring up and cleared during device shutdown and are applicable only to QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/sysfs-driver-qat_ras | 41 +++++++ drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 14 +++ .../crypto/intel/qat/qat_common/adf_init.c | 3 + .../qat/qat_common/adf_sysfs_ras_counters.c | 112 ++++++++++++++++++ .../qat/qat_common/adf_sysfs_ras_counters.h | 27 +++++ 7 files changed, 199 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat_ras create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras new file mode 100644 index 000000000000..176dea1e9c0a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_ras @@ -0,0 +1,41 @@ +What: /sys/bus/pci/devices//qat_ras/errors_correctable +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of correctable errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_nonfatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of non fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_fatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/reset_error_counters +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (WO) Write to resets all error counters of a device. + + The following example reports how to reset the counters:: + + # echo 1 > /sys/bus/pci/devices//qat_ras/reset_error_counters + # cat /sys/bus/pci/devices//qat_ras/errors_correctable + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_nonfatal + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_fatal + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 2ccd1223f1ef..8f483d1197dd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -418,6 +418,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } + accel_dev->ras_errors.enabled = true; adf_dbgfs_init(accel_dev); ret = adf_dev_up(accel_dev, true); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 9ba2f8aa1e81..fcf74c0dc534 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -13,6 +13,7 @@ intel_qat-objs := adf_cfg.o \ adf_admin.o \ adf_hw_arbiter.o \ adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ adf_gen4_hw_data.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index c8492d792c0e..1c11d90bd9f3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -7,6 +7,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_pfvf_msg.h" @@ -81,6 +82,18 @@ enum dev_sku_info { DEV_SKU_UNKNOWN, }; +enum ras_errors { + ADF_RAS_CORR, + ADF_RAS_UNCORR, + ADF_RAS_FATAL, + ADF_RAS_ERRORS, +}; + +struct adf_error_counters { + atomic_t counter[ADF_RAS_ERRORS]; + bool enabled; +}; + static inline const char *get_sku_info(enum dev_sku_info info) { switch (info) { @@ -361,6 +374,7 @@ struct adf_accel_dev { u8 pf_compat_ver; } vf; }; + struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; u32 accel_id; diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 4cf49f52d4dd..ef51c4d028d2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,7 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_sysfs_ras_counters.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -245,6 +246,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); adf_dbgfs_add(accel_dev); + adf_sysfs_start_ras(accel_dev); return 0; } @@ -271,6 +273,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) return; adf_dbgfs_rm(accel_dev); + adf_sysfs_stop_ras(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c new file mode 100644 index 000000000000..cffe2d722995 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_sysfs_ras_counters.h" + +static ssize_t errors_correctable_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_nonfatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_fatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t reset_error_counters_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + + if (buf[0] != '1' || count != 2) + return -EINVAL; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + return count; +} + +static DEVICE_ATTR_RO(errors_correctable); +static DEVICE_ATTR_RO(errors_nonfatal); +static DEVICE_ATTR_RO(errors_fatal); +static DEVICE_ATTR_WO(reset_error_counters); + +static struct attribute *qat_ras_attrs[] = { + &dev_attr_errors_correctable.attr, + &dev_attr_errors_nonfatal.attr, + &dev_attr_errors_fatal.attr, + &dev_attr_reset_error_counters.attr, + NULL, +}; + +static struct attribute_group qat_ras_group = { + .attrs = qat_ras_attrs, + .name = "qat_ras", +}; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_ras attribute group.\n"); +} + +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h new file mode 100644 index 000000000000..99e9d9cf57f8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RAS_H +#define ADF_RAS_H + +#include +#include + +struct adf_accel_dev; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev); +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev); + +#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \ + atomic_read(&(ras_errors).counter[ERR]) + +#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \ + do { \ + for (int err = 0; err < ADF_RAS_ERRORS; ++err) \ + atomic_set(&(ras_errors).counter[err], 0); \ + } while (0) + +#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \ + atomic_inc(&(ras_errors).counter[ERR]) + +#endif /* ADF_RAS_H */ -- Gitee From 4c35107a646d5f93060974431728f5a46aad85ce Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:53 +0100 Subject: [PATCH 733/953] crypto: qat - count QAT GEN4 errors ANBZ: #8589 commit 99b1c9826e481c3ebe6e7d905b7a0edf853639fd upstream. Intel-SIG: commit 99b1c9826e48 crypto: qat - count QAT GEN4 errors Backport to support Intel QAT in-tree driver Add logic to count correctable, non fatal and fatal error for QAT GEN4 devices. These counters are reported through sysfs attributes in the group qat_ras. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 182 ++++++++++++++++-- 1 file changed, 166 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 8ba9c9bdb89b..048c24607939 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -3,6 +3,9 @@ #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_ras.h" +#include "adf_sysfs_ras_counters.h" + +#define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE) static void enable_errsou_reporting(void __iomem *csr) { @@ -355,6 +358,8 @@ static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, "Correctable error detected in AE: 0x%x\n", aecorrerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + /* Clear interrupt from ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } @@ -374,6 +379,8 @@ static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, "Uncorrectable error detected in AE: 0x%x\n", aeuncorerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); return false; @@ -395,6 +402,8 @@ static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, "HI CPP agent command parity error: 0x%x\n", cmdparerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); return true; @@ -413,15 +422,18 @@ static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; - if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "RI Memory Parity uncorrectable error: 0x%x\n", rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { dev_err(&GET_DEV(accel_dev), "RI Memory Parity fatal error: 0x%x\n", rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -445,6 +457,7 @@ static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } return false; @@ -467,6 +480,8 @@ static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } return false; @@ -487,6 +502,8 @@ static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts); } @@ -509,6 +526,8 @@ static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); } @@ -530,6 +549,8 @@ static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); } @@ -551,6 +572,8 @@ static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, "Command Parity error detected on IOSFP: 0x%x\n", rimiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); return true; @@ -586,6 +609,8 @@ static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, "Uncorrectable error on ssm shared memory: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); return false; @@ -606,6 +631,8 @@ static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, "Correctable error on ssm shared memory: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); return false; @@ -626,6 +653,8 @@ static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, "Uncorrectable error CPP transaction on memory target: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); return false; @@ -642,6 +671,8 @@ static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Slice %s hang error encountered\n", slice_name); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, @@ -682,6 +713,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); reset_required = true; @@ -693,6 +726,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); reset_required = true; @@ -704,6 +739,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); reset_required = true; @@ -715,6 +752,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); reset_required = true; @@ -727,6 +766,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); reset_required = true; @@ -748,6 +789,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); } @@ -757,6 +800,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); } @@ -766,6 +811,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); } @@ -775,6 +822,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); } @@ -785,6 +834,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); } } @@ -805,6 +856,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); reset_required = true; @@ -816,6 +869,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); reset_required = true; @@ -827,6 +882,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); reset_required = true; @@ -839,6 +896,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, "SPP push command fatal error PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); reset_required = true; @@ -851,6 +910,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); reset_required = true; @@ -872,6 +933,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); } @@ -881,6 +944,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); } @@ -890,6 +955,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); } @@ -899,6 +966,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); } @@ -909,6 +978,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, reg); } @@ -936,8 +1007,11 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + u32 bits_num = BITS_PER_REG(reg); bool reset_required = false; - u32 reg; + unsigned long errs_bits; + u32 bit_iterator; if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) return false; @@ -948,12 +1022,22 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } reset_required = true; } - if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "non-Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); @@ -971,35 +1055,47 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); reg &= err_mask->parerr_ath_cph_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); reg &= err_mask->parerr_pke_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + } if (err_mask->parerr_wat_wcp_mask) { reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); reg &= err_mask->parerr_wat_wcp_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, reg); + } } dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); @@ -1010,8 +1106,11 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + u32 bits_num = BITS_PER_REG(reg); bool reset_required = false; - u32 reg; + unsigned long errs_bits; + u32 bit_iterator; if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) @@ -1025,17 +1124,34 @@ static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + reset_required = true; } - if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); - if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) { dev_warn(&GET_DEV(accel_dev), "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + } + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); return reset_required; @@ -1077,6 +1193,8 @@ static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM CMP: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); return false; @@ -1092,14 +1210,20 @@ static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, if (!reg) return false; - if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM XLT: 0x%x", reg); - if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) { dev_warn(&GET_DEV(accel_dev), "Correctable error exception in SSM XLT: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); return false; @@ -1118,14 +1242,20 @@ static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, if (!reg) continue; - if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM DCMP: 0x%x", reg); - if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) { dev_warn(&GET_DEV(accel_dev), "Correctable error exception in SSM DCMP: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); } @@ -1161,11 +1291,13 @@ static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -1173,6 +1305,7 @@ static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: multiple errors: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -1204,6 +1337,8 @@ static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + return true; } @@ -1221,6 +1356,8 @@ static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); return false; @@ -1240,6 +1377,8 @@ static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); return false; @@ -1259,6 +1398,8 @@ static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, dev_warn(&GET_DEV(accel_dev), "ARAM correctable error : 0x%x\n", aram_cerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); @@ -1286,10 +1427,14 @@ static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; } else { dev_err(&GET_DEV(accel_dev), "ARAM uncorrectable error: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; @@ -1319,10 +1464,13 @@ static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, "Misc memory target multiple uncorrectable errors: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; } else { dev_err(&GET_DEV(accel_dev), "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; @@ -1351,6 +1499,8 @@ static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, "Ring Pair (%u) ATU detected fault: 0x%x\n", i, atufaultstatus); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); } } -- Gitee From 28af9068cdd78469a10ad95b1e2ac2d418781c3c Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Fri, 20 Oct 2023 15:49:24 +0200 Subject: [PATCH 734/953] crypto: qat - move admin api ANBZ: #8589 commit 8e6857f76dafba874593107f9e5c20030c5956ed upstream. Intel-SIG: commit 8e6857f76daf crypto: qat - move admin api Backport to support Intel QAT in-tree driver The admin API is growing and deserves its own include. Move it from adf_common_drv.h to adf_admin.h. Signed-off-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 1 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 1 + .../crypto/intel/qat/qat_common/adf_admin.h | 19 +++++++++++++++++++ .../crypto/intel/qat/qat_common/adf_clock.c | 1 + .../intel/qat/qat_common/adf_cnv_dbgfs.c | 1 + .../intel/qat/qat_common/adf_common_drv.h | 10 ---------- .../intel/qat/qat_common/adf_fw_counters.c | 1 + .../crypto/intel/qat/qat_common/adf_gen4_pm.c | 1 + .../qat/qat_common/adf_gen4_pm_debugfs.c | 1 + .../intel/qat/qat_common/adf_gen4_timer.c | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 1 + .../qat/qat_common/adf_heartbeat_dbgfs.c | 1 + .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 1 + 15 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_admin.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 695a1f149362..2dd1339bbda7 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 - 2021 Intel Corporation */ #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 9c00c441b602..a882e0ea2279 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 355a781693eb..48cf3eb7c734 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 3a04e743497f..15ffda582334 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -7,6 +7,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h new file mode 100644 index 000000000000..03507ec3a51d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_ADMIN +#define ADF_ADMIN + +struct adf_accel_dev; + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c index eae44969dc84..cf89f57de2a7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_clock.c +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_clock.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c index aa5b6ff1dfb4..07119c487da0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -6,6 +6,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cnv_dbgfs.h" #include "qat_compression.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index d9342634f9c1..f06188033a93 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -87,16 +87,6 @@ void adf_reset_flr(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); -int adf_init_admin_comms(struct adf_accel_dev *accel_dev); -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); -int adf_send_admin_init(struct adf_accel_dev *accel_dev); -int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); -int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); -int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); -int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); -int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); -int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); -int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index 6abe4736eab8..98fb7ccfed9f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -9,6 +9,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_fw_counters.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index c663d3a20c5b..5dafd9a270db 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -5,6 +5,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "adf_cfg_strings.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c index 5114759287c6..ee0b5079de3e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -6,6 +6,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "icp_qat_fw_init_admin.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c index 646c57922fcd..35ccb91d6ec1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c @@ -9,6 +9,7 @@ #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_timer.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index beef9a5f6c75..13f48d2f6da8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -12,6 +12,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_clock.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 803cbfd838f0..2661af6a2ef6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -8,6 +8,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_heartbeat.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 09551f949126..af14090cc4be 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include -- Gitee From 5cc6a8967347503840d52345b31e7c248b72a90d Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:25 +0200 Subject: [PATCH 735/953] units: Add BYTES_PER_*BIT ANBZ: #8589 commit e8eed5f7366f1f5decb694168bd06fb59ef6b12c upstream. Intel-SIG: commit e8eed5f7366f units: Add BYTES_PER_*BIT Backport to support Intel QAT in-tree driver There is going to be a new user of the BYTES_PER_[K/M/G]BIT definition besides possibly existing ones. Add them to the header. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/units.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2..ff1bd6b5f5b3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -31,6 +31,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) -- Gitee From b9e2ebf75e80541064505fb3a8006904f2c365d7 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:26 +0200 Subject: [PATCH 736/953] crypto: qat - add bits.h to icp_qat_hw.h ANBZ: #8589 commit 02e7f67c47269135f41650ac1b693034e3e8f507 upstream. Intel-SIG: commit 02e7f67c4726 crypto: qat - add bits.h to icp_qat_hw.h Backport to support Intel QAT in-tree driver Some enums use the macro BIT. Include bits.h as it is missing. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/icp_qat_hw.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index 0c8883e2ccc6..eb2ef225bcee 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -3,6 +3,8 @@ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ +#include + enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_0 = 0, ICP_QAT_HW_AE_1 = 1, -- Gitee From 078b9c59b8023a056cfa48cd105c126bc732ce9b Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:27 +0200 Subject: [PATCH 737/953] crypto: qat - add retrieval of fw capabilities ANBZ: #8589 commit c7fd53796dbd09c3ef55032925bc7f8f238f9405 upstream. Intel-SIG: commit c7fd53796dbd crypto: qat - add retrieval of fw capabilities Backport to support Intel QAT in-tree driver The QAT firmware provides a mechanism to retrieve its capabilities through the init admin interface. Add logic to retrieve the firmware capability mask from the firmware through the init/admin channel. This mask reports if the power management, telemetry and rate limiting features are supported. The fw capabilities are stored in the accel_dev structure and are used to detect if a certain feature is supported by the firmware loaded in the device. This is supported only by devices which have an admin AE. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 23 +++++++++++++++++++ .../qat/qat_common/icp_qat_fw_init_admin.h | 3 +++ 3 files changed, 27 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 1c11d90bd9f3..908959288ce5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -253,6 +253,7 @@ struct adf_hw_device_data { u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; + u16 fw_capabilities; u32 clock_frequency; u32 instance_id; u16 accel_mask; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 15ffda582334..50e054ba2c33 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -310,6 +310,26 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) return !strcmp(services, "dcc"); } +static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + if (!ae_mask) + return 0; + + req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET; + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + *caps = resp.fw_capabilities; + + return 0; +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. @@ -320,6 +340,7 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); u32 dc_capabilities = 0; int ret; @@ -340,6 +361,8 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; + adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities); + return adf_init_ae(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 9e5ce419d875..e4de9a30e0bd 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, @@ -109,10 +110,12 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + __u16 fw_capabilities; }; } __packed; #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET #define ICP_QAT_NUMBER_OF_PM_EVENTS 8 -- Gitee From 77b83dfccb91e6797279dc979f68b42ed55584b9 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:28 +0200 Subject: [PATCH 738/953] crypto: qat - add rate limiting feature to qat_4xxx ANBZ: #8589 commit d9fb8408376e70a903d06ac86e42e0d0f44a5785 upstream. Intel-SIG: commit d9fb8408376e crypto: qat - add rate limiting feature to qat_4xxx Backport to support Intel QAT in-tree driver The Rate Limiting (RL) feature allows to control the rate of requests that can be submitted on a ring pair (RP). This allows sharing a QAT device among multiple users while ensuring a guaranteed throughput. The driver provides a mechanism that allows users to set policies, that are programmed to the device. The device is then enforcing those policies. Configuration of RL is accomplished through entities called SLAs (Service Level Agreement). Each SLA object gets a unique identifier and defines the limitations for a single service across up to four ring pairs (RPs count allocated to a single VF). The rate is determined using two fields: * CIR (Committed Information Rate), i.e., the guaranteed rate. * PIR (Peak Information Rate), i.e., the maximum rate achievable when the device has available resources. The rate values are expressed in permille scale i.e. 0-1000. Ring pair selection is achieved by providing a 64-bit mask, where each bit corresponds to one of the ring pairs. This adds an interface and logic that allow to add, update, retrieve and remove an SLA. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 20 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 13 +- drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 3 + .../crypto/intel/qat/qat_common/adf_admin.c | 47 + .../crypto/intel/qat/qat_common/adf_admin.h | 8 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 7 + .../crypto/intel/qat/qat_common/adf_init.c | 10 + drivers/crypto/intel/qat/qat_common/adf_rl.c | 1159 +++++++++++++++++ drivers/crypto/intel/qat/qat_common/adf_rl.h | 169 +++ .../intel/qat/qat_common/adf_rl_admin.c | 97 ++ .../intel/qat/qat_common/adf_rl_admin.h | 18 + .../qat/qat_common/icp_qat_fw_init_admin.h | 38 + 13 files changed, 1590 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl_admin.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl_admin.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 2dd1339bbda7..b64aaecdd98b 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -320,6 +320,24 @@ static u32 get_heartbeat_clock(struct adf_hw_device_data *self) return ADF_4XXX_KPT_COUNTER_FREQ; } +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; +} + static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; @@ -579,12 +597,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->stop_timer = adf_gen4_timer_stop; hw_data->get_hb_clock = get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_init_rl_data(&hw_data->rl_data); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index efd5dadc19ed..33423295e90f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -82,8 +82,19 @@ #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" +/* RL constants */ +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_4XXX_RL_DCPR_CORRECTION 1 +#define ADF_4XXX_RL_SCANS_PER_SEC 954 +#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL +#define ADF_4XXX_RL_MAX_TP_SYM 95000UL +#define ADF_4XXX_RL_MAX_TP_DC 45000UL +#define ADF_4XXX_RL_SLICE_REF 1000UL + /* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) /* qat_4xxx fuse bits are different from old GENs, redefine them */ enum icp_qat_4xxx_slice_mask { diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index fcf74c0dc534..5f09dfd4798b 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -29,6 +29,8 @@ intel_qat-objs := adf_cfg.o \ qat_algs.o \ qat_asym_algs.o \ qat_algs_send.o \ + adf_rl.o \ + adf_rl_admin.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 908959288ce5..30c2b15ff801 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include "adf_cfg_common.h" +#include "adf_rl.h" #include "adf_pfvf_msg.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" @@ -247,6 +248,7 @@ struct adf_hw_device_data { struct adf_dc_ops dc_ops; struct adf_ras_ops ras_ops; struct adf_dev_err_mask dev_err_mask; + struct adf_rl_hw_data rl_data; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -358,6 +360,7 @@ struct adf_accel_dev { struct adf_accel_pci accel_pci_dev; struct adf_timer *timer; struct adf_heartbeat *heartbeat; + struct adf_rl *rate_limiting; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 50e054ba2c33..54b673ec2362 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -330,6 +330,53 @@ static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) return 0; } +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_RL_INIT; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slices, &resp.slices, sizeof(*slices)); + + return 0; +} + +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + + /* + * req struct filled in rl implementation. Used commands + * ICP_QAT_FW_RL_ADD for a new SLA + * ICP_QAT_FW_RL_UPDATE for update SLA + */ + return adf_send_admin(accel_dev, req, &resp, ae_mask); +} + +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + + req.cmd_id = ICP_QAT_FW_RL_REMOVE; + req.node_id = node_id; + req.node_type = node_type; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h index 03507ec3a51d..55cbcbc66c9f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.h +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -3,6 +3,8 @@ #ifndef ADF_ADMIN #define ADF_ADMIN +#include "icp_qat_fw_init_admin.h" + struct adf_accel_dev; int adf_init_admin_comms(struct adf_accel_dev *accel_dev); @@ -12,6 +14,12 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices); +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req); +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 02d7a019ebf8..1813fe1d5a06 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -139,6 +139,13 @@ do { \ /* Number of heartbeat counter pairs */ #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE +/* Rate Limiting */ +#define ADF_GEN4_RL_R2L_OFFSET 0x508000 +#define ADF_GEN4_RL_L2C_OFFSET 0x509000 +#define ADF_GEN4_RL_C2S_OFFSET 0x508818 +#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 +#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index ef51c4d028d2..81c39f3d07e1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,7 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_rl.h" #include "adf_sysfs_ras_counters.h" static LIST_HEAD(service_table); @@ -137,6 +138,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } adf_heartbeat_init(accel_dev); + ret = adf_rl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; /* * Subservice initialisation is divided into two stages: init and start. @@ -212,6 +216,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } adf_heartbeat_start(accel_dev); + ret = adf_rl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { @@ -272,6 +279,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); adf_sysfs_stop_ras(accel_dev); @@ -359,6 +367,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + adf_rl_exit(accel_dev); + if (hw_data->ras_ops.disable_ras_errors) hw_data->ras_ops.disable_ras_errors(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c new file mode 100644 index 000000000000..88a03105b52a --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include + +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_rl_admin.h" +#include "adf_rl.h" + +#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U +#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U +#define RL_TOKEN_PCIE_SIZE 64 +#define RL_TOKEN_ASYM_SIZE 1024 +#define RL_CSR_SIZE 4U +#define RL_CAPABILITY_MASK GENMASK(6, 4) +#define RL_CAPABILITY_VALUE 0x70 +#define RL_VALIDATE_NON_ZERO(input) ((input) == 0) +#define ROOT_MASK GENMASK(1, 0) +#define CLUSTER_MASK GENMASK(3, 0) +#define LEAF_MASK GENMASK(5, 0) + +static int validate_user_input(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + const unsigned long rp_mask = sla_in->rp_mask; + size_t rp_mask_size; + int i, cnt; + + if (sla_in->pir < sla_in->cir) { + dev_notice(&GET_DEV(accel_dev), + "PIR must be >= CIR, setting PIR to CIR\n"); + sla_in->pir = sla_in->cir; + } + + if (!is_update) { + cnt = 0; + rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE; + for_each_set_bit(i, &rp_mask, rp_mask_size) { + if (++cnt > RL_RP_CNT_PER_LEAF_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Too many ring pairs selected for this SLA\n"); + return -EINVAL; + } + } + + if (sla_in->srv >= ADF_SVC_NONE) { + dev_notice(&GET_DEV(accel_dev), + "Wrong service type\n"); + return -EINVAL; + } + + if (sla_in->type > RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), + "Wrong node type\n"); + return -EINVAL; + } + + if (sla_in->parent_id < RL_PARENT_DEFAULT_ID || + sla_in->parent_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Wrong parent ID\n"); + return -EINVAL; + } + } + + return 0; +} + +static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) +{ + struct rl_sla *sla; + + if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); + return -EINVAL; + } + + sla = accel_dev->rate_limiting->sla[sla_id]; + + if (!sla) { + dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); + return -EINVAL; + } + + if (sla->type != RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); + return -EINVAL; + } + + return 0; +} + +/** + * find_parent() - Find the parent for a new SLA + * @rl_data: pointer to ratelimiting data + * @sla_in: pointer to user input data for a new SLA + * + * Function returns a pointer to the parent SLA. If the parent ID is provided + * as input in the user data, then such ID is validated and the parent SLA + * is returned. + * Otherwise, it returns the default parent SLA (root or cluster) for + * the new object. + * + * Return: + * * Pointer to the parent SLA object + * * NULL - when parent cannot be found + */ +static struct rl_sla *find_parent(struct adf_rl *rl_data, + struct adf_rl_sla_input_data *sla_in) +{ + int input_parent_id = sla_in->parent_id; + struct rl_sla *root = NULL; + struct rl_sla *parent_sla; + int i; + + if (sla_in->type == RL_ROOT) + return NULL; + + if (input_parent_id > RL_PARENT_DEFAULT_ID) { + parent_sla = rl_data->sla[input_parent_id]; + /* + * SLA can be a parent if it has the same service as the child + * and its type is higher in the hierarchy, + * for example the parent type of a LEAF must be a CLUSTER. + */ + if (parent_sla && parent_sla->srv == sla_in->srv && + parent_sla->type == sla_in->type - 1) + return parent_sla; + + return NULL; + } + + /* If input_parent_id is not valid, get root for this service type. */ + for (i = 0; i < RL_ROOT_MAX; i++) { + if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) { + root = rl_data->root[i]; + break; + } + } + + if (!root) + return NULL; + + /* + * If the type of this SLA is cluster, then return the root. + * Otherwise, find the default (i.e. first) cluster for this service. + */ + if (sla_in->type == RL_CLUSTER) + return root; + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root) + return rl_data->cluster[i]; + } + + return NULL; +} + +static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv) +{ + switch (rl_srv) { + case ADF_SVC_ASYM: + return ASYM; + case ADF_SVC_SYM: + return SYM; + case ADF_SVC_DC: + return COMP; + default: + return UNUSED; + } +} + +/** + * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * @rl_data: pointer to ratelimiting data + * @type: SLA type + * @sla_arr: pointer to variable where requested pointer will be stored + * + * Return: Max number of elements allowed for the returned array + */ +static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr) +{ + switch (type) { + case RL_LEAF: + *sla_arr = rl_data->leaf; + return RL_LEAF_MAX; + case RL_CLUSTER: + *sla_arr = rl_data->cluster; + return RL_CLUSTER_MAX; + case RL_ROOT: + *sla_arr = rl_data->root; + return RL_ROOT_MAX; + default: + *sla_arr = NULL; + return 0; + } +} + +static bool is_service_enabled(struct adf_accel_dev *accel_dev, + enum adf_base_services rl_srv) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv); + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u8 rps_per_bundle = hw_data->num_banks_per_vf; + int i; + + for (i = 0; i < rps_per_bundle; i++) { + if (GET_SRV_TYPE(accel_dev, i) == arb_srv) + return true; + } + + return false; +} + +/** + * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask + * @accel_dev: pointer to acceleration device structure + * @sla: SLA object data where result will be written + * @rp_mask: bitmask of ring pair IDs + * + * Function tries to convert provided bitmap to an array of IDs. It checks if + * RPs aren't in use, are assigned to SLA service or if a number of provided + * IDs is not too big. If successful, writes the result into the field + * sla->ring_pairs_cnt. + * + * Return: + * * 0 - ok + * * -EINVAL - ring pairs array cannot be created from provided mask + */ +static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + const unsigned long rp_mask) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv); + u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; + bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; + size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids); + u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; + u16 cnt = 0; + u16 rp_id; + + for_each_set_bit(rp_id, &rp_mask, rp_id_max) { + if (cnt >= rp_cnt_max) { + dev_notice(&GET_DEV(accel_dev), + "Assigned more ring pairs than supported"); + return -EINVAL; + } + + if (rp_in_use[rp_id]) { + dev_notice(&GET_DEV(accel_dev), + "RP %u already assigned to other SLA", rp_id); + return -EINVAL; + } + + if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { + dev_notice(&GET_DEV(accel_dev), + "RP %u does not support SLA service", rp_id); + return -EINVAL; + } + + sla->ring_pairs_ids[cnt++] = rp_id; + } + + sla->ring_pairs_cnt = cnt; + + return 0; +} + +static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used) +{ + u16 rp_id; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + rp_id = sla->ring_pairs_ids[i]; + rp_in_use[rp_id] = used; + } +} + +static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.r2l_offset; + u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK); + u32 offset; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]); + ADF_CSR_WR(pmisc_addr, offset, node_id); + } +} + +static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.l2c_offset; + u32 node_id = sla->node_id & LEAF_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.c2s_offset; + u32 node_id = sla->node_id & CLUSTER_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_node_to_parent(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear_assignment) +{ + switch (sla->type) { + case RL_LEAF: + assign_rps_to_leaf(accel_dev, sla, clear_assignment); + assign_leaf_to_cluster(accel_dev, sla, clear_assignment); + break; + case RL_CLUSTER: + assign_cluster_to_root(accel_dev, sla, clear_assignment); + break; + default: + break; + } +} + +/** + * can_parent_afford_sla() - Verifies if parent allows to create an SLA + * @sla_in: pointer to user input data for a new SLA + * @sla_parent: pointer to parent SLA object + * @sla_cir: current child CIR value (only for update) + * @is_update: request is a update + * + * Algorithm verifies if parent has enough remaining budget to take assignment + * of a child with provided parameters. In update case current CIR value must be + * returned to budget first. + * PIR value cannot exceed the PIR assigned to parent. + * + * Return: + * * true - SLA can be created + * * false - SLA cannot be created + */ +static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla_parent, u32 sla_cir, + bool is_update) +{ + u32 rem_cir = sla_parent->rem_cir; + + if (is_update) + rem_cir += sla_cir; + + if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir) + return false; + + return true; +} + +/** + * can_node_afford_update() - Verifies if SLA can be updated with input data + * @sla_in: pointer to user input data for a new SLA + * @sla: pointer to SLA object selected for update + * + * Algorithm verifies if a new CIR value is big enough to satisfy currently + * assigned child SLAs and if PIR can be updated + * + * Return: + * * true - SLA can be updated + * * false - SLA cannot be updated + */ +static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla) +{ + u32 cir_in_use = sla->cir - sla->rem_cir; + + /* new CIR cannot be smaller then currently consumed value */ + if (cir_in_use > sla_in->cir) + return false; + + /* PIR of root/cluster cannot be reduced in node with assigned children */ + if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0) + return false; + + return true; +} + +static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + u32 max_val = rl_data->device_data->scale_ref; + struct rl_sla *parent = sla->parent; + bool ret = true; + + if (sla_in->cir > max_val || sla_in->pir > max_val) + ret = false; + + switch (sla->type) { + case RL_LEAF: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + break; + case RL_CLUSTER: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + case RL_ROOT: + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + default: + ret = false; + break; + } + + return ret; +} + +static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update) +{ + switch (sla->type) { + case RL_LEAF: + if (is_update) + sla->parent->rem_cir += old_cir; + + sla->parent->rem_cir -= sla->cir; + sla->rem_cir = 0; + break; + case RL_CLUSTER: + if (is_update) { + sla->parent->rem_cir += old_cir; + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + } else { + sla->rem_cir = sla->cir; + } + + sla->parent->rem_cir -= sla->cir; + break; + case RL_ROOT: + if (is_update) + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + else + sla->rem_cir = sla->cir; + break; + default: + break; + } +} + +/** + * get_next_free_sla_id() - finds next free ID in the SLA array + * @rl_data: Pointer to ratelimiting data structure + * + * Return: + * * 0 : RL_NODES_CNT_MAX - correct ID + * * -ENOSPC - all SLA slots are in use + */ +static int get_next_free_sla_id(struct adf_rl *rl_data) +{ + int i = 0; + + while (i < RL_NODES_CNT_MAX && rl_data->sla[i++]) + ; + + if (i == RL_NODES_CNT_MAX) + return -ENOSPC; + + return i - 1; +} + +/** + * get_next_free_node_id() - finds next free ID in the array of that node type + * @rl_data: Pointer to ratelimiting data structure + * @sla: Pointer to SLA object for which the ID is searched + * + * Return: + * * 0 : RL_[NODE_TYPE]_MAX - correct ID + * * -ENOSPC - all slots of that type are in use + */ +static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla) +{ + struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); + int max_id, i, step, rp_per_leaf; + struct rl_sla **sla_list; + + rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf; + + /* + * Static nodes mapping: + * root0 - cluster[0,4,8,12] - leaf[0-15] + * root1 - cluster[1,5,9,13] - leaf[16-31] + * root2 - cluster[2,6,10,14] - leaf[32-47] + */ + switch (sla->type) { + case RL_LEAF: + i = sla->srv * rp_per_leaf; + step = 1; + max_id = i + rp_per_leaf; + sla_list = rl_data->leaf; + break; + case RL_CLUSTER: + i = sla->srv; + step = 4; + max_id = RL_CLUSTER_MAX; + sla_list = rl_data->cluster; + break; + case RL_ROOT: + return sla->srv; + default: + return -EINVAL; + } + + while (i < max_id && sla_list[i]) + i += step; + + if (i >= max_id) + return -ENOSPC; + + return i; +} + +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 avail_slice_cycles, allocated_tokens; + + if (!sla_val) + return 0; + + avail_slice_cycles = hw_data->clock_frequency; + + switch (svc_type) { + case ADF_SVC_ASYM: + avail_slice_cycles *= device_data->slices.pke_cnt; + break; + case ADF_SVC_SYM: + avail_slice_cycles *= device_data->slices.cph_cnt; + break; + case ADF_SVC_DC: + avail_slice_cycles *= device_data->slices.dcpr_cnt; + break; + default: + break; + } + + do_div(avail_slice_cycles, device_data->scan_interval); + allocated_tokens = avail_slice_cycles * sla_val; + do_div(allocated_tokens, device_data->scale_ref); + + return allocated_tokens; +} + +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 allocated_ae_cycles, avail_ae_cycles; + + if (!sla_val) + return 0; + + avail_ae_cycles = hw_data->clock_frequency; + avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1; + do_div(avail_ae_cycles, device_data->scan_interval); + + sla_val *= device_data->max_tp[svc_type]; + sla_val /= device_data->scale_ref; + + allocated_ae_cycles = (sla_val * avail_ae_cycles); + do_div(allocated_ae_cycles, device_data->max_tp[svc_type]); + + return allocated_ae_cycles; +} + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + u64 sla_to_bytes, allocated_bw, sla_scaled; + + if (!sla_val) + return 0; + + sla_to_bytes = sla_val; + sla_to_bytes *= device_data->max_tp[svc_type]; + do_div(sla_to_bytes, device_data->scale_ref); + + sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : + BYTES_PER_MBIT; + if (svc_type == ADF_SVC_DC && is_bw_out) + sla_to_bytes *= device_data->slices.dcpr_cnt - + device_data->dcpr_correction; + + sla_scaled = sla_to_bytes * device_data->pcie_scale_mul; + do_div(sla_scaled, device_data->pcie_scale_div); + allocated_bw = sla_scaled; + do_div(allocated_bw, RL_TOKEN_PCIE_SIZE); + do_div(allocated_bw, device_data->scan_interval); + + return allocated_bw; +} + +/** + * add_new_sla_entry() - creates a new SLA object and fills it with user data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new SLA + * @sla_out: Pointer to variable that will contain the address of a new + * SLA object if the operation succeeds + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +static int add_new_sla_entry(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + struct rl_sla **sla_out) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + sla = kzalloc(sizeof(*sla), GFP_KERNEL); + if (!sla) { + ret = -ENOMEM; + goto ret_err; + } + *sla_out = sla; + + if (!is_service_enabled(accel_dev, sla_in->srv)) { + dev_notice(&GET_DEV(accel_dev), + "Provided service is not enabled\n"); + ret = -EINVAL; + goto ret_err; + } + + sla->srv = sla_in->srv; + sla->type = sla_in->type; + ret = get_next_free_node_id(rl_data, sla); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Exceeded number of available nodes for that service\n"); + goto ret_err; + } + sla->node_id = ret; + + ret = get_next_free_sla_id(rl_data); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Allocated maximum SLAs number\n"); + goto ret_err; + } + sla->sla_id = ret; + + sla->parent = find_parent(rl_data, sla_in); + if (!sla->parent && sla->type != RL_ROOT) { + if (sla_in->parent_id != RL_PARENT_DEFAULT_ID) + dev_notice(&GET_DEV(accel_dev), + "Provided parent ID does not exist or cannot be parent for this SLA."); + else + dev_notice(&GET_DEV(accel_dev), + "Unable to find parent node for this service. Is service enabled?"); + ret = -EINVAL; + goto ret_err; + } + + if (sla->type == RL_LEAF) { + ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); + if (!sla->ring_pairs_cnt || ret) { + dev_notice(&GET_DEV(accel_dev), + "Unable to find ring pairs to assign to the leaf"); + if (!ret) + ret = -EINVAL; + + goto ret_err; + } + } + + return 0; + +ret_err: + kfree(sla); + *sla_out = NULL; + + return ret; +} + +static int initialize_default_nodes(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct adf_rl_hw_data *device_data = rl_data->device_data; + struct adf_rl_sla_input_data sla_in = { }; + int ret = 0; + int i; + + /* Init root for each enabled service */ + sla_in.type = RL_ROOT; + sla_in.parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!is_service_enabled(accel_dev, i)) + continue; + + sla_in.cir = device_data->scale_ref; + sla_in.pir = sla_in.cir; + sla_in.srv = i; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + /* Init default cluster for each root */ + sla_in.type = RL_CLUSTER; + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!rl_data->root[i]) + continue; + + sla_in.cir = rl_data->root[i]->cir; + sla_in.pir = sla_in.cir; + sla_in.srv = rl_data->root[i]->srv; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + return 0; +} + +static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) +{ + bool *rp_in_use = rl_data->rp_in_use; + struct rl_sla **sla_type_arr = NULL; + int i, sla_id, node_id; + u32 old_cir; + + sla_id = sla->sla_id; + node_id = sla->node_id; + old_cir = sla->cir; + sla->cir = 0; + sla->pir = 0; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + rp_in_use[sla->ring_pairs_ids[i]] = false; + + update_budget(sla, old_cir, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + assign_node_to_parent(rl_data->accel_dev, sla, true); + adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); + mark_rps_usage(sla, rl_data->rp_in_use, false); + + kfree(sla); + rl_data->sla[sla_id] = NULL; + sla_type_arr[node_id] = NULL; +} + +/** + * add_update_sla() - handles the creation and the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new/updated SLA + * @is_update: flag to indicate if this is an update or an add operation + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - user input data cannot be used to create SLA + * * -ENOSPC - all available SLAs are in use + */ +static int add_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, bool is_update) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + struct rl_sla *sla = NULL; + u32 old_cir = 0; + int ret; + + if (!sla_in) { + dev_warn(&GET_DEV(accel_dev), + "SLA input data pointer is missing\n"); + ret = -EFAULT; + goto ret_err; + } + + /* Input validation */ + ret = validate_user_input(accel_dev, sla_in, is_update); + if (ret) + goto ret_err; + + mutex_lock(&rl_data->rl_lock); + + if (is_update) { + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + goto ret_err; + + sla = rl_data->sla[sla_in->sla_id]; + old_cir = sla->cir; + } else { + ret = add_new_sla_entry(accel_dev, sla_in, &sla); + if (ret) + goto ret_err; + } + + if (!is_enough_budget(rl_data, sla, sla_in, is_update)) { + dev_notice(&GET_DEV(accel_dev), + "Input value exceeds the remaining budget%s\n", + is_update ? " or more budget is already in use" : ""); + ret = -EINVAL; + goto ret_err; + } + sla->cir = sla_in->cir; + sla->pir = sla_in->pir; + + /* Apply SLA */ + assign_node_to_parent(accel_dev, sla, false); + ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "Failed to apply an SLA\n"); + goto ret_err; + } + update_budget(sla, old_cir, is_update); + + if (!is_update) { + mark_rps_usage(sla, rl_data->rp_in_use, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + sla_type_arr[sla->node_id] = sla; + rl_data->sla[sla->sla_id] = sla; + } + + sla_in->sla_id = sla->sla_id; + goto ret_ok; + +ret_err: + if (!is_update) { + sla_in->sla_id = -1; + kfree(sla); + } +ret_ok: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_add_sla() - handles the creation of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to add an SLA + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, false); +} + +/** + * adf_rl_update_sla() - handles the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to update an SLA + * + * Return: + * * 0 - ok + * * -EINVAL - user input data cannot be used to update SLA + */ +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, true); +} + +/** + * adf_rl_get_sla() - returns an existing SLA data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user data where SLA info will be stored + * + * The sla_id for which data are requested should be set in sla_id structure + * + * Return: + * * 0 - ok + * * -EINVAL - provided sla_id does not exist + */ +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + struct rl_sla *sla; + int ret, i; + + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + return ret; + + sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; + sla_in->type = sla->type; + sla_in->srv = sla->srv; + sla_in->cir = sla->cir; + sla_in->pir = sla->pir; + sla_in->rp_mask = 0U; + if (sla->parent) + sla_in->parent_id = sla->parent->sla_id; + else + sla_in->parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]); + + return 0; +} + +/** + * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for + * selected service or provided sla_id + * @accel_dev: pointer to acceleration device structure + * @srv: service ID for which capability is requested + * @sla_id: ID of the cluster or root to which we want assign a new SLA + * + * Check if the provided SLA id is valid. If it is and the service matches + * the requested service and the type is cluster or root, return the remaining + * capability. + * If the provided ID does not match the service or type, return the remaining + * capacity of the default cluster for that service. + * + * Return: + * * Positive value - correct remaining value + * * -EINVAL - algorithm cannot find a remaining value for provided data + */ +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla = NULL; + int i; + + if (srv >= ADF_SVC_NONE) + return -EINVAL; + + if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { + sla = rl_data->sla[sla_id]; + + if (sla->srv == srv && sla->type <= RL_CLUSTER) + goto ret_ok; + } + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (!rl_data->cluster[i]) + continue; + + if (rl_data->cluster[i]->srv == srv) { + sla = rl_data->cluster[i]; + goto ret_ok; + } + } + + return -EINVAL; +ret_ok: + return sla->rem_cir; +} + +/** + * adf_rl_remove_sla() - removes provided sla_id + * @accel_dev: pointer to acceleration device structure + * @sla_id: ID of the cluster or root to which we want assign an new SLA + * + * Return: + * * 0 - ok + * * -EINVAL - wrong sla_id or it still have assigned children + */ +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + mutex_lock(&rl_data->rl_lock); + ret = validate_sla_id(accel_dev, sla_id); + if (ret) + goto err_ret; + + sla = rl_data->sla[sla_id]; + + if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) { + dev_notice(&GET_DEV(accel_dev), + "To remove parent SLA all its children must be removed first"); + ret = -EINVAL; + goto err_ret; + } + + clear_sla(rl_data, sla); + +err_ret: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_remove_sla_all() - removes all SLAs from device + * @accel_dev: pointer to acceleration device structure + * @incl_default: set to true if default SLAs also should be removed + */ +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int end_type = incl_default ? RL_ROOT : RL_LEAF; + struct rl_sla **sla_type_arr = NULL; + u32 max_id; + int i, j; + + mutex_lock(&rl_data->rl_lock); + + /* Unregister and remove all SLAs */ + for (j = RL_LEAF; j >= end_type; j--) { + max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + + for (i = 0; i < max_id; i++) { + if (!sla_type_arr[i]) + continue; + + clear_sla(rl_data, sla_type_arr[i]); + } + } + + mutex_unlock(&rl_data->rl_lock); +} + +int adf_rl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data; + struct adf_rl *rl; + int ret = 0; + + /* Validate device parameters */ + if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) { + ret = -EOPNOTSUPP; + goto err_ret; + } + + rl = kzalloc(sizeof(*rl), GFP_KERNEL); + if (!rl) { + ret = -ENOMEM; + goto err_ret; + } + + mutex_init(&rl->rl_lock); + rl->device_data = &accel_dev->hw_device->rl_data; + rl->accel_dev = accel_dev; + accel_dev->rate_limiting = rl; + +err_ret: + return ret; +} + +int adf_rl_start(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + int ret; + + if (!accel_dev->rate_limiting) { + ret = -EOPNOTSUPP; + goto ret_err; + } + + if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { + dev_info(&GET_DEV(accel_dev), "not supported\n"); + ret = -EOPNOTSUPP; + goto ret_free; + } + + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset, + RL_TOKEN_GRANULARITY_PCIEIN_BUCKET); + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset, + RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET); + + ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); + if (ret) { + dev_err(&GET_DEV(accel_dev), "initialization failed\n"); + goto ret_free; + } + + ret = initialize_default_nodes(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to initialize default SLAs\n"); + goto ret_sla_rm; + } + + return 0; + +ret_sla_rm: + adf_rl_remove_sla_all(accel_dev, true); +ret_free: + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +ret_err: + return ret; +} + +void adf_rl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + adf_rl_remove_sla_all(accel_dev, true); +} + +void adf_rl_exit(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h new file mode 100644 index 000000000000..1ccb6613c92e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_H_ +#define ADF_RL_H_ + +#include +#include + +struct adf_accel_dev; + +#define RL_ROOT_MAX 4 +#define RL_CLUSTER_MAX 16 +#define RL_LEAF_MAX 64 +#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX) +#define RL_RP_CNT_PER_LEAF_MAX 4U +#define RL_RP_CNT_MAX 64 +#define RL_SLA_EMPTY_ID -1 +#define RL_PARENT_DEFAULT_ID -1 + +enum rl_node_type { + RL_ROOT, + RL_CLUSTER, + RL_LEAF, +}; + +enum adf_base_services { + ADF_SVC_ASYM = 0, + ADF_SVC_SYM, + ADF_SVC_DC, + ADF_SVC_NONE, +}; + +/** + * struct adf_rl_sla_input_data - ratelimiting user input data structure + * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA. + * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned. + * @sla_id: ID of current SLA for operations update, rm, get. For the add + * operation, this field will be updated with the ID of the newly + * added SLA + * @parent_id: ID of the SLA to which the current one should be assigned. + * Set to -1 to refer to the default parent. + * @cir: Committed information rate. Rate guaranteed to be achieved. Input value + * is expressed in permille scale, i.e. 1000 refers to the maximum + * device throughput for a selected service. + * @pir: Peak information rate. Maximum rate available that the SLA can achieve. + * Input value is expressed in permille scale, i.e. 1000 refers to + * the maximum device throughput for a selected service. + * @type: SLA type: root, cluster, node + * @srv: Service associated to the SLA: asym, sym dc. + * + * This structure is used to perform operations on an SLA. + * Depending on the operation, some of the parameters are ignored. + * The following list reports which parameters should be set for each operation. + * - add: all except sla_id + * - update: cir, pir, sla_id + * - rm: sla_id + * - rm_all: - + * - get: sla_id + * - get_capability_rem: srv, sla_id + */ +struct adf_rl_sla_input_data { + u64 rp_mask; + int sla_id; + int parent_id; + unsigned int cir; + unsigned int pir; + enum rl_node_type type; + enum adf_base_services srv; +}; + +struct rl_slice_cnt { + u8 dcpr_cnt; + u8 pke_cnt; + u8 cph_cnt; +}; + +struct adf_rl_hw_data { + u32 scale_ref; + u32 scan_interval; + u32 r2l_offset; + u32 l2c_offset; + u32 c2s_offset; + u32 pciin_tb_offset; + u32 pciout_tb_offset; + u32 pcie_scale_mul; + u32 pcie_scale_div; + u32 dcpr_correction; + u32 max_tp[RL_ROOT_MAX]; + struct rl_slice_cnt slices; +}; + +/** + * struct adf_rl - ratelimiting data structure + * @accel_dev: pointer to acceleration device data + * @device_data: pointer to rate limiting data specific to a device type (or revision) + * @sla: array of pointers to SLA objects + * @root: array of pointers to root type SLAs, element number reflects node_id + * @cluster: array of pointers to cluster type SLAs, element number reflects node_id + * @leaf: array of pointers to leaf type SLAs, element number reflects node_id + * @rp_in_use: array of ring pair IDs already used in one of SLAs + * @rl_lock: mutex object which is protecting data in this structure + * @input: structure which is used for holding the data received from user + */ +struct adf_rl { + struct adf_accel_dev *accel_dev; + struct adf_rl_hw_data *device_data; + /* mapping sla_id to SLA objects */ + struct rl_sla *sla[RL_NODES_CNT_MAX]; + struct rl_sla *root[RL_ROOT_MAX]; + struct rl_sla *cluster[RL_CLUSTER_MAX]; + struct rl_sla *leaf[RL_LEAF_MAX]; + bool rp_in_use[RL_RP_CNT_MAX]; + /* Mutex protecting writing to SLAs lists */ + struct mutex rl_lock; +}; + +/** + * struct rl_sla - SLA object data structure + * @parent: pointer to the parent SLA (root/cluster) + * @type: SLA type + * @srv: service associated with this SLA + * @sla_id: ID of the SLA, used as element number in SLA array and as identifier + * shared with the user + * @node_id: ID of node, each of SLA type have a separate ID list + * @cir: committed information rate + * @pir: peak information rate (PIR >= CIR) + * @rem_cir: if this SLA is a parent then this field represents a remaining + * value to be used by child SLAs. + * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA + * @ring_pairs_cnt: number of assigned ring pairs listed in the array above + */ +struct rl_sla { + struct rl_sla *parent; + enum rl_node_type type; + enum adf_base_services srv; + u32 sla_id; + u32 node_id; + u32 cir; + u32 pir; + u32 rem_cir; + u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX]; + u16 ring_pairs_cnt; +}; + +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id); +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id); +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default); + +int adf_rl_init(struct adf_accel_dev *accel_dev); +int adf_rl_start(struct adf_accel_dev *accel_dev); +void adf_rl_stop(struct adf_accel_dev *accel_dev); +void adf_rl_exit(struct adf_accel_dev *accel_dev); + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out); +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); + +#endif /* ADF_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c new file mode 100644 index 000000000000..698a14f4ce66 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_rl_admin.h" + +static void +prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, + struct icp_qat_fw_init_admin_sla_config_params *fw_params, + struct icp_qat_fw_init_admin_req *req, bool is_update) +{ + req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; + req->init_cfg_ptr = dma_addr; + req->init_cfg_sz = sizeof(*fw_params); + req->node_id = sla->node_id; + req->node_type = sla->type; + req->rp_count = sla->ring_pairs_cnt; + req->svc_type = sla->srv; +} + +static void +prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + struct icp_qat_fw_init_admin_sla_config_params *fw_params) +{ + fw_params->pcie_in_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false); + fw_params->pcie_in_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false); + fw_params->pcie_out_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true); + fw_params->pcie_out_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true); + + fw_params->slice_util_cir = + adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv); + fw_params->slice_util_pir = + adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv); + + fw_params->ae_util_cir = + adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv); + fw_params->ae_util_pir = + adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv); + + memcpy(fw_params->rp_ids, sla->ring_pairs_ids, + sizeof(sla->ring_pairs_ids)); +} + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int) +{ + struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; + int ret; + + ret = adf_send_admin_rl_init(accel_dev, &slices_resp); + if (ret) + return ret; + + slices_int->dcpr_cnt = slices_resp.dcpr_cnt; + slices_int->pke_cnt = slices_resp.pke_cnt; + /* For symmetric crypto, slice tokens are relative to the UCS slice */ + slices_int->cph_cnt = slices_resp.ucs_cnt; + + return 0; +} + +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update) +{ + struct icp_qat_fw_init_admin_sla_config_params *fw_params; + struct icp_qat_fw_init_admin_req req = { }; + dma_addr_t dma_addr; + int ret; + + fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), + &dma_addr, GFP_KERNEL); + if (!fw_params) + return -ENOMEM; + + prep_admin_req_params(accel_dev, sla, fw_params); + prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update); + ret = adf_send_admin_rl_add_update(accel_dev, &req); + + dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params, + dma_addr); + + return ret; +} + +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + return adf_send_admin_rl_delete(accel_dev, node_id, node_type); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h new file mode 100644 index 000000000000..dd5419b7e896 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_ADMIN_H_ +#define ADF_RL_ADMIN_H_ + +#include + +#include "adf_rl.h" + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int); +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update); +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); + +#endif /* ADF_RL_ADMIN_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index e4de9a30e0bd..cd418b51d9f3 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -5,6 +5,8 @@ #include "icp_qat_fw.h" +#define RL_MAX_RP_IDS 16 + enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_AE = 0, ICP_QAT_FW_TRNG_ENABLE = 1, @@ -19,10 +21,14 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, + ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, ICP_QAT_FW_PM_INFO = 129, + ICP_QAT_FW_RL_ADD = 134, + ICP_QAT_FW_RL_UPDATE = 135, + ICP_QAT_FW_RL_REMOVE = 136, }; enum icp_qat_fw_init_admin_resp_status { @@ -30,6 +36,30 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_slice_cnt { + __u8 cpr_cnt; + __u8 xlt_cnt; + __u8 dcpr_cnt; + __u8 pke_cnt; + __u8 wat_cnt; + __u8 wcp_cnt; + __u8 ucs_cnt; + __u8 cph_cnt; + __u8 ath_cnt; +}; + +struct icp_qat_fw_init_admin_sla_config_params { + __u32 pcie_in_cir; + __u32 pcie_in_pir; + __u32 pcie_out_cir; + __u32 pcie_out_pir; + __u32 slice_util_cir; + __u32 slice_util_pir; + __u32 ae_util_cir; + __u32 ae_util_pir; + __u16 rp_ids[RL_MAX_RP_IDS]; +}; + struct icp_qat_fw_init_admin_req { __u16 init_cfg_sz; __u8 resrvd1; @@ -49,6 +79,13 @@ struct icp_qat_fw_init_admin_req { struct { __u32 heartbeat_ticks; }; + struct { + __u16 node_id; + __u8 node_type; + __u8 svc_type; + __u8 resrvd5[3]; + __u8 rp_count; + }; __u32 idle_filter; }; @@ -110,6 +147,7 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + struct icp_qat_fw_init_admin_slice_cnt slices; __u16 fw_capabilities; }; } __packed; -- Gitee From 059fdb0e2947e2e353b51a68f3c4e87cccbe8eb6 Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:29 +0200 Subject: [PATCH 739/953] crypto: qat - add rate limiting sysfs interface ANBZ: #8589 commit db74e16258198094701f18ab4da3410c44ffdb2e upstream. Intel-SIG: commit db74e1625819 crypto: qat - add rate limiting sysfs interface Backport to support Intel QAT in-tree driver Add an interface for the rate limiting feature which allows to add, remove and modify a QAT SLA (Service Level Agreement). This adds a new sysfs attribute group, `qat_rl`, which can be accessed from /sys/bus/pci/devices/ with the following hierarchy: |-+ qat_rl |---- id (RW) # SLA identifier |---- cir (RW) # Committed Information Rate |---- pir (RW) # Peak Information Rate |---- srv (RW) # Service to be rate limited |---- rp (RW) (HEX) # Ring pairs to be rate limited |---- cap_rem (RW) # Remaining capability for a service |---- sla_op (WO) # Allows to perform an operation on an SLA The API works by setting the appropriate RW attributes and then issuing a command through the `sla_op`. For example, to create an SLA, a user needs to input the necessary data into the attributes cir, pir, srv and rp and then write into `sla_op` the command `add` to execute the operation. The API also provides `cap_rem` attribute to get information about the remaining device capability within a certain service which is required when setting an SLA. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat_rl | 226 +++++++++ drivers/crypto/intel/qat/qat_common/Makefile | 1 + drivers/crypto/intel/qat/qat_common/adf_rl.c | 10 + drivers/crypto/intel/qat/qat_common/adf_rl.h | 7 + .../intel/qat/qat_common/adf_sysfs_rl.c | 451 ++++++++++++++++++ .../intel/qat/qat_common/adf_sysfs_rl.h | 11 + 6 files changed, 706 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat_rl create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h diff --git a/Documentation/ABI/testing/sysfs-driver-qat_rl b/Documentation/ABI/testing/sysfs-driver-qat_rl new file mode 100644 index 000000000000..8c282ae3155d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_rl @@ -0,0 +1,226 @@ +What: /sys/bus/pci/devices//qat_rl/sla_op +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (WO) This attribute is used to perform an operation on an SLA. + The supported operations are: add, update, rm, rm_all, and get. + + Input values must be filled through the associated attribute in + this group before a write to this file. + If the operation completes successfully, the associated + attributes will be updated. + The associated attributes are: cir, pir, srv, rp, and id. + + Supported operations: + + * add: Creates a new SLA with the provided inputs from user. + * Inputs: cir, pir, srv, and rp + * Output: id + + * get: Returns the configuration of the specified SLA in id attribute + * Inputs: id + * Outputs: cir, pir, srv, and rp + + * update: Updates the SLA with new values set in the following attributes + * Inputs: id, cir, and pir + + * rm: Removes the specified SLA in the id attribute. + * Inputs: id + + * rm_all: Removes all the configured SLAs. + * Inputs: None + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/rp +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) When read, reports the current assigned ring pairs for the + queried SLA. + When wrote to, configures the ring pairs associated to a new SLA. + + The value is a 64-bit bit mask and is written/displayed in hex. + Each bit of this mask represents a single ring pair i.e., + bit 1 == ring pair id 0; bit 3 == ring pair id 2. + + Selected ring pairs must to be assigned to a single service, + i.e. the one provided with the srv attribute. The service + assigned to a certain ring pair can be checked by querying + the attribute qat/rp2srv. + + The maximum number of ring pairs is 4 per SLA. + + Applicability in sla_op: + + * WRITE: add operation + * READ: get operation + + Example usage:: + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 + + ## Write + # echo 0x5 > /sys/bus/pci/devices//qat_rl/rp + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/id +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) If written to, the value is used to retrieve a particular + SLA and operate on it. + This is valid only for the following operations: update, rm, + and get. + A read of this attribute is only guaranteed to have correct data + after creation of an SLA. + + Applicability in sla_op: + + * WRITE: rm and update operations + * READ: add and get operations + + Example usage:: + + ## Read + ## Set attributes e.g. cir, pir, srv, etc + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Write + # echo 7 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 ## ring pair ID 0 and ring pair ID 2 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Committed information rate (CIR). Rate guaranteed to be + achieved by a particular SLA. The value is expressed in + permille scale, i.e. 1000 refers to the maximum device + throughput for a selected service. + + After sending a "get" to sla_op, this will be populated with the + CIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 500 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cir + 500 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/pir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Peak information rate (PIR). The maximum rate that can be + achieved by that particular SLA. An SLA can reach a value + between CIR and PIR when the device is not fully utilized by + requests from other users (assigned to different SLAs). + + After sending a "get" to sla_op, this will be populated with the + PIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 750 > /sys/bus/pci/devices//qat_rl/pir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/pir + 750 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Service (SRV). Represents the service (sym, asym, dc) + associated to an SLA. + Can be written to or queried to set/show the SRV type for an SLA. + The SRV attribute is used to specify the SRV type before adding + an SLA. After an SLA is configured, reports the service + associated to that SLA. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo "dc" > /sys/bus/pci/devices//qat_rl/srv + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/srv + dc + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cap_rem +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This file will return the remaining capability for a + particular service/sla. This is the remaining value that a new + SLA can be set to or a current SLA can be increased with. + + Example usage:: + + # echo "asym" > /sys/bus/pci/devices//qat_rl/cap_rem + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 250 + # echo 250 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 5f09dfd4798b..779a8aa0b8d2 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -31,6 +31,7 @@ intel_qat-objs := adf_cfg.o \ qat_algs_send.o \ adf_rl.o \ adf_rl_admin.o \ + adf_sysfs_rl.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 88a03105b52a..86e3e2152b1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -16,6 +16,7 @@ #include "adf_common_drv.h" #include "adf_rl_admin.h" #include "adf_rl.h" +#include "adf_sysfs_rl.h" #define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U #define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U @@ -1130,8 +1131,16 @@ int adf_rl_start(struct adf_accel_dev *accel_dev) goto ret_sla_rm; } + ret = adf_sysfs_rl_add(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n"); + goto ret_sysfs_rm; + } + return 0; +ret_sysfs_rm: + adf_sysfs_rl_rm(accel_dev); ret_sla_rm: adf_rl_remove_sla_all(accel_dev, true); ret_free: @@ -1146,6 +1155,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) if (!accel_dev->rate_limiting) return; + adf_sysfs_rl_rm(accel_dev); adf_rl_remove_sla_all(accel_dev, true); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index 1ccb6613c92e..eb5a330f8543 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -75,6 +75,12 @@ struct rl_slice_cnt { u8 cph_cnt; }; +struct adf_rl_interface_data { + struct adf_rl_sla_input_data input; + enum adf_base_services cap_rem_srv; + struct rw_semaphore lock; +}; + struct adf_rl_hw_data { u32 scale_ref; u32 scan_interval; @@ -113,6 +119,7 @@ struct adf_rl { bool rp_in_use[RL_RP_CNT_MAX]; /* Mutex protecting writing to SLAs lists */ struct mutex rl_lock; + struct adf_rl_interface_data user_input; }; /** diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c new file mode 100644 index 000000000000..abf9c52474ec --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define GET_RL_STRUCT(accel_dev) ((accel_dev)->rate_limiting->user_input) + +enum rl_ops { + ADD, + UPDATE, + RM, + RM_ALL, + GET, +}; + +enum rl_params { + RP_MASK, + ID, + CIR, + PIR, + SRV, + CAP_REM_SRV, +}; + +static const char *const rl_services[] = { + [ADF_SVC_ASYM] = "asym", + [ADF_SVC_SYM] = "sym", + [ADF_SVC_DC] = "dc", +}; + +static const char *const rl_operations[] = { + [ADD] = "add", + [UPDATE] = "update", + [RM] = "rm", + [RM_ALL] = "rm_all", + [GET] = "get", +}; + +static int set_param_u(struct device *dev, enum rl_params param, u64 set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + switch (param) { + case RP_MASK: + data->input.rp_mask = set; + break; + case CIR: + data->input.cir = set; + break; + case PIR: + data->input.pir = set; + break; + case SRV: + data->input.srv = set; + break; + case CAP_REM_SRV: + data->cap_rem_srv = set; + break; + default: + ret = -EINVAL; + break; + } + up_write(&data->lock); + + return ret; +} + +static int set_param_s(struct device *dev, enum rl_params param, int set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev || param != ID) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + data->input.sla_id = set; + up_write(&data->lock); + + return 0; +} + +static int get_param_u(struct device *dev, enum rl_params param, u64 *get) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + switch (param) { + case RP_MASK: + *get = data->input.rp_mask; + break; + case CIR: + *get = data->input.cir; + break; + case PIR: + *get = data->input.pir; + break; + case SRV: + *get = data->input.srv; + break; + default: + ret = -EINVAL; + } + up_read(&data->lock); + + return ret; +} + +static int get_param_s(struct device *dev, enum rl_params param) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + if (param == ID) + ret = data->input.sla_id; + up_read(&data->lock); + + return ret; +} + +static ssize_t rp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, RP_MASK, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%#llx\n", get); +} + +static ssize_t rp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + u64 val; + + err = kstrtou64(buf, 16, &val); + if (err) + return err; + + err = set_param_u(dev, RP_MASK, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(rp); + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", get_param_s(dev, ID)); +} + +static ssize_t id_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + int val; + + err = kstrtoint(buf, 10, &val); + if (err) + return err; + + err = set_param_s(dev, ID, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(id); + +static ssize_t cir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, CIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t cir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, CIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(cir); + +static ssize_t pir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, PIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t pir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, PIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(pir); + +static ssize_t srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, SRV, &get); + if (ret) + return ret; + + if (get == ADF_SVC_NONE) + return -EINVAL; + + return sysfs_emit(buf, "%s\n", rl_services[get]); +} + +static ssize_t srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(srv); + +static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret, rem_cap; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + rem_cap = adf_rl_get_capability_remaining(accel_dev, data->cap_rem_srv, + RL_SLA_EMPTY_ID); + up_read(&data->lock); + if (rem_cap < 0) + return rem_cap; + + ret = sysfs_emit(buf, "%u\n", rem_cap); + + return ret; +} + +static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, CAP_REM_SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(cap_rem); + +static ssize_t sla_op_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + ret = sysfs_match_string(rl_operations, buf); + if (ret < 0) + return ret; + + down_write(&data->lock); + switch (ret) { + case ADD: + data->input.parent_id = RL_PARENT_DEFAULT_ID; + data->input.type = RL_LEAF; + data->input.sla_id = 0; + ret = adf_rl_add_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case UPDATE: + ret = adf_rl_update_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case RM: + ret = adf_rl_remove_sla(accel_dev, data->input.sla_id); + if (ret) + goto err_free_lock; + break; + case RM_ALL: + adf_rl_remove_sla_all(accel_dev, false); + break; + case GET: + ret = adf_rl_get_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + default: + ret = -EINVAL; + goto err_free_lock; + } + up_write(&data->lock); + + return count; + +err_free_lock: + up_write(&data->lock); + + return ret; +} +static DEVICE_ATTR_WO(sla_op); + +static struct attribute *qat_rl_attrs[] = { + &dev_attr_rp.attr, + &dev_attr_id.attr, + &dev_attr_cir.attr, + &dev_attr_pir.attr, + &dev_attr_srv.attr, + &dev_attr_cap_rem.attr, + &dev_attr_sla_op.attr, + NULL, +}; + +static struct attribute_group qat_rl_group = { + .attrs = qat_rl_attrs, + .name = "qat_rl", +}; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + int ret; + + data = &GET_RL_STRUCT(accel_dev); + + ret = device_add_group(&GET_DEV(accel_dev), &qat_rl_group); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_rl attribute group\n"); + + data->cap_rem_srv = ADF_SVC_NONE; + data->input.srv = ADF_SVC_NONE; + + return ret; +} + +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) +{ + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h new file mode 100644 index 000000000000..22d36aa8a757 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_SYSFS_RL_H_ +#define ADF_SYSFS_RL_H_ + +struct adf_accel_dev; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev); +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_SYSFS_RL_H_ */ -- Gitee From 3a408a8ee4ec82c5d5b71ff7a119fb5a832b208c Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:30 +0200 Subject: [PATCH 740/953] crypto: qat - add rp2svc sysfs attribute ANBZ: #8589 commit dbc8876dd873a6ac5e3191b419d2de5ca613165f upstream. Intel-SIG: commit dbc8876dd873 crypto: qat - add rp2svc sysfs attribute Backport to support Intel QAT in-tree driver Add the attribute `rp2svc` to the `qat` attribute group. This provides a way for a user to query a specific ring pair for the type of service that is currently configured for. When read, the service will be returned for the defined ring pair. When written to this value will be stored as the ring pair to return the service of. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 32 +++++++++ .../intel/qat/qat_common/adf_accel_devices.h | 6 ++ .../crypto/intel/qat/qat_common/adf_sysfs.c | 66 +++++++++++++++++++ 3 files changed, 104 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 96834d103a09..f24a5ddca94b 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -95,3 +95,35 @@ Description: (RW) This configuration option provides a way to force the device i 0 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/rp2srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This attribute provides a way for a user to query a + specific ring pair for the type of service that it is currently + configured for. + + When written to, the value is cached and used to perform the + read operation. Allowed values are in the range 0 to N-1, where + N is the max number of ring pairs supported by a device. This + can be queried using the attribute qat/num_rps. + + A read returns the service associated to the ring pair queried. + + The values are: + + * dc: the ring pair is configured for running compression services + * sym: the ring pair is configured for running symmetric crypto + services + * asym: the ring pair is configured for running asymmetric crypto + services + + Example usage:: + + # echo 1 > /sys/bus/pci/devices//qat/rp2srv + # cat /sys/bus/pci/devices//qat/rp2srv + sym + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 30c2b15ff801..4ff5729a3496 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -340,6 +340,11 @@ struct adf_pm { char __user *buf, size_t count, loff_t *pos); }; +struct adf_sysfs { + int ring_num; + struct rw_semaphore lock; /* protects access to the fields in this struct */ +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; @@ -361,6 +366,7 @@ struct adf_accel_dev { struct adf_timer *timer; struct adf_heartbeat *heartbeat; struct adf_rl *rate_limiting; + struct adf_sysfs sysfs; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index f4a89f7ed4e9..9317127128a9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -8,6 +8,8 @@ #include "adf_cfg_services.h" #include "adf_common_drv.h" +#define UNSET_RING_NUM -1 + static const char * const state_operations[] = { [DEV_DOWN] = "down", [DEV_UP] = "up", @@ -205,10 +207,72 @@ static DEVICE_ATTR_RW(pm_idle_enabled); static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); +static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + enum adf_cfg_service_type svc; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + hw_data = GET_HW_DATA(accel_dev); + + if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) + return -EINVAL; + + down_read(&accel_dev->sysfs.lock); + svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num % + hw_data->num_banks_per_vf); + up_read(&accel_dev->sysfs.lock); + + switch (svc) { + case COMP: + return sysfs_emit(buf, "%s\n", ADF_CFG_DC); + case SYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_SYM); + case ASYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM); + default: + break; + } + return -EINVAL; +} + +static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + int ring, num_rings, ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = kstrtouint(buf, 10, &ring); + if (ret) + return ret; + + num_rings = GET_MAX_BANKS(accel_dev); + if (ring >= num_rings) { + dev_err(&GET_DEV(accel_dev), + "Device does not support more than %u ring pairs\n", + num_rings); + return -EINVAL; + } + + down_write(&accel_dev->sysfs.lock); + accel_dev->sysfs.ring_num = ring; + up_write(&accel_dev->sysfs.lock); + + return count; +} +static DEVICE_ATTR_RW(rp2srv); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, + &dev_attr_rp2srv.attr, NULL, }; @@ -227,6 +291,8 @@ int adf_sysfs_init(struct adf_accel_dev *accel_dev) "Failed to create qat attribute group: %d\n", ret); } + accel_dev->sysfs.ring_num = UNSET_RING_NUM; + return ret; } EXPORT_SYMBOL_GPL(adf_sysfs_init); -- Gitee From 2be6621895de95c8e98d2f43c8af88a36ceffd30 Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:31 +0200 Subject: [PATCH 741/953] crypto: qat - add num_rps sysfs attribute ANBZ: #8589 commit 71fed09b49c168435fc28d57870007495475d946 upstream. Intel-SIG: commit 71fed09b49c1 crypto: qat - add num_rps sysfs attribute Backport to support Intel QAT in-tree driver Add the attribute `num_rps` to the `qat` attribute group. This returns the number of ring pairs that a single device has. This allows to know the maximum value that can be set to the attribute `rp2svc`. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 14 ++++++++++++++ drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index f24a5ddca94b..bbf329cf0d67 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -127,3 +127,17 @@ Description: sym This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/num_rps +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RO) Returns the number of ring pairs that a single device has. + + Example usage:: + + # cat /sys/bus/pci/devices//qat/num_rps + 64 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 9317127128a9..ddffc98119c6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -268,11 +268,25 @@ static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(rp2srv); +static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev)); +} +static DEVICE_ATTR_RO(num_rps); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, &dev_attr_rp2srv.attr, + &dev_attr_num_rps.attr, NULL, }; -- Gitee From b380e9ff2cc9371499dbb51ee5cfdbd4d93e3adc Mon Sep 17 00:00:00 2001 From: Xingui Yang Date: Tue, 5 Sep 2023 02:48:33 +0000 Subject: [PATCH 742/953] seq_file: add helper macro to define attribute for rw file ANBZ: #8589 commit 9cba82bba500e3ce875381350f289cfb3aa633ba upstream. Intel-SIG: commit 9cba82bba500 seq_file: add helper macro to define attribute for rw file Backport to support Intel QAT in-tree driver Patch series "Add helper macro DEFINE_SHOW_STORE_ATTRIBUTE() at seq_file.c", v6. We already own DEFINE_SHOW_ATTRIBUTE() helper macro for defining attribute for read-only file, but we found many of drivers also want a helper macro for read-write file too. So we add this helper macro to reduce duplicated code. This patch (of 3): We already own DEFINE_SHOW_ATTRIBUTE() helper macro for defining attribute for read-only file, but many of drivers want a helper macro for read-write file too. So we add DEFINE_SHOW_STORE_ATTRIBUTE() helper to reduce duplicated code. Link: https://lkml.kernel.org/r/20230905024835.43219-1-yangxingui@huawei.com Link: https://lkml.kernel.org/r/20230905024835.43219-2-yangxingui@huawei.com Signed-off-by: Luo Jiaxing Co-developed-by: Xingui Yang Signed-off-by: Xingui Yang Reviewed-by: Andy Shevchenko Cc: Al Viro Cc: Animesh Manna Cc: Anshuman Gupta Cc: Damien Le Moal Cc: Felipe Balbi Cc: Greg Kroah-Hartman Cc: Himanshu Madhani Cc: James Bottomley Cc: John Garry Cc: Martin K. Petersen Cc: Uma Shankar Cc: Xiang Chen Cc: Zeng Tao Signed-off-by: Andrew Morton [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/seq_file.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b839..234bcdb1fba4 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ -- Gitee From 27b2a471bd72aaf6367eacac8d219904f5425790 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 28 Nov 2023 19:44:03 +0200 Subject: [PATCH 743/953] units: add missing header ANBZ: #8589 commit 8e92157d7f6190c86bfd6144a409001469827100 upstream. Intel-SIG: commit 8e92157d7f61 units: add missing header Backport to support Intel QAT in-tree driver BITS_PER_BYTE is defined in bits.h. Link: https://lkml.kernel.org/r/20231128174404.393393-1-andriy.shevchenko@linux.intel.com Fixes: e8eed5f7366f ("units: Add BYTES_PER_*BIT") Signed-off-by: Andy Shevchenko Reviewed-by: Randy Dunlap Cc: Damian Muszynski Cc: Rasmus Villemoes Cc: Herbert Xu Signed-off-by: Andrew Morton [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/units.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/units.h b/include/linux/units.h index ff1bd6b5f5b3..45110daaf8d3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include #include /* Metric prefixes in accordance with Système international (d'unités) */ -- Gitee From 77b6b83afe7063c1062b94fd093d82b5046373bd Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 31 Oct 2023 11:58:32 +0300 Subject: [PATCH 744/953] crypto: qat - prevent underflow in rp2srv_store() ANBZ: #8589 commit e53c741303a59ee1682e11f61b7772863e02526d upstream. Intel-SIG: commit e53c741303a5 crypto: qat - prevent underflow in rp2srv_store() Backport to support Intel QAT in-tree driver The "ring" variable has an upper bounds check but nothing checks for negatives. This code uses kstrtouint() already and it was obviously intended to be declared as unsigned int. Make it so. Fixes: dbc8876dd873 ("crypto: qat - add rp2svc sysfs attribute") Signed-off-by: Dan Carpenter Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index ddffc98119c6..6f0b3629da13 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -242,7 +242,8 @@ static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adf_accel_dev *accel_dev; - int ring, num_rings, ret; + int num_rings, ret; + unsigned int ring; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) -- Gitee From d935becf2c315cdf42cce2ad37ce01ccb4ec1b23 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 21 Nov 2023 17:59:45 +0100 Subject: [PATCH 745/953] crypto: qat - add sysfs_added flag for ras ANBZ: #8589 commit 65089000ba8c2ae713ccac6603319143f3e1c08b upstream. Intel-SIG: commit 65089000ba8c crypto: qat - add sysfs_added flag for ras Backport to support Intel QAT in-tree driver The qat_ras sysfs attribute group is registered within the adf_dev_start() function, alongside other driver components. If any of the functions preceding the group registration fails, the adf_dev_start() function returns, and the caller, to undo the operation, invokes adf_dev_stop() followed by adf_dev_shutdown(). However, the current flow lacks information about whether the registration of the qat_ras attribute group was successful or not. In cases where this condition is encountered, an error similar to the following might be reported: 4xxx 0000:6b:00.0: Starting device qat_dev0 4xxx 0000:6b:00.0: qat_dev0 started 9 acceleration engines 4xxx 0000:6b:00.0: Failed to send init message 4xxx 0000:6b:00.0: Failed to start device qat_dev0 sysfs group 'qat_ras' not found for kobject '0000:6b:00.0' ... sysfs_remove_groups+0x29/0x50 adf_sysfs_stop_ras+0x4b/0x80 [intel_qat] adf_dev_stop+0x43/0x1d0 [intel_qat] adf_dev_down+0x4b/0x150 [intel_qat] ... 4xxx 0000:6b:00.0: qat_dev0 stopped 9 acceleration engines 4xxx 0000:6b:00.0: Resetting device qat_dev0 To prevent attempting to remove attributes from a group that has not been added yet, a flag named 'sysfs_added' is introduced. This flag is set to true upon the successful registration of the attribute group. Fixes: 532d7f6bc458 ("crypto: qat - add error counters") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 4ff5729a3496..9d5fdd529a2e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -92,6 +92,7 @@ enum ras_errors { struct adf_error_counters { atomic_t counter[ADF_RAS_ERRORS]; + bool sysfs_added; bool enabled; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c index cffe2d722995..e97c67c87b3c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -99,6 +99,8 @@ void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) dev_err(&GET_DEV(accel_dev), "Failed to create qat_ras attribute group.\n"); + + accel_dev->ras_errors.sysfs_added = true; } void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) @@ -106,7 +108,10 @@ void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) if (!accel_dev->ras_errors.enabled) return; - device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + if (accel_dev->ras_errors.sysfs_added) { + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + accel_dev->ras_errors.sysfs_added = false; + } ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); } -- Gitee From e56cf1bad5c664e861f982ad775ece232b5b0789 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 21 Nov 2023 18:02:23 +0100 Subject: [PATCH 746/953] crypto: qat - add sysfs_added flag for rate limiting ANBZ: #8589 commit d71fdd0f3c278c7f132c3a522645ebf9157edd6d upstream. Intel-SIG: commit d71fdd0f3c27 crypto: qat - add sysfs_added flag for rate limiting Backport to support Intel QAT in-tree driver The qat_rl sysfs attribute group is registered within the adf_dev_start() function, alongside other driver components. If any of the functions preceding the group registration fails, the adf_dev_start() function returns, and the caller, to undo the operation, invokes adf_dev_stop() followed by adf_dev_shutdown(). However, the current flow lacks information about whether the registration of the qat_rl attribute group was successful or not. In cases where this condition is encountered, an error similar to the following might be reported: 4xxx 0000:6b:00.0: Starting device qat_dev0 4xxx 0000:6b:00.0: qat_dev0 started 9 acceleration engines 4xxx 0000:6b:00.0: Failed to send init message 4xxx 0000:6b:00.0: Failed to start device qat_dev0 sysfs group 'qat_rl' not found for kobject '0000:6b:00.0' ... sysfs_remove_groups+0x2d/0x50 adf_sysfs_rl_rm+0x44/0x70 [intel_qat] adf_rl_stop+0x2d/0xb0 [intel_qat] adf_dev_stop+0x33/0x1d0 [intel_qat] adf_dev_down+0xf1/0x150 [intel_qat] ... 4xxx 0000:6b:00.0: qat_dev0 stopped 9 acceleration engines 4xxx 0000:6b:00.0: Resetting device qat_dev0 To prevent attempting to remove attributes from a group that has not been added yet, a flag named 'sysfs_added' is introduced. This flag is set to true upon the successful registration of the attribute group. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.h | 1 + drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index eb5a330f8543..269c6656fb90 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -79,6 +79,7 @@ struct adf_rl_interface_data { struct adf_rl_sla_input_data input; enum adf_base_services cap_rem_srv; struct rw_semaphore lock; + bool sysfs_added; }; struct adf_rl_hw_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c index abf9c52474ec..bedb514d4e30 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -441,11 +441,19 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) data->cap_rem_srv = ADF_SVC_NONE; data->input.srv = ADF_SVC_NONE; + data->sysfs_added = true; return ret; } void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) { + struct adf_rl_interface_data *data; + + data = &GET_RL_STRUCT(accel_dev); + if (!data->sysfs_added) + return; + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); + data->sysfs_added = false; } -- Gitee From 75bd5a2d0a7939b26cb99d2b0c61dcaf62649682 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 28 Nov 2023 18:37:32 +0100 Subject: [PATCH 747/953] crypto: qat - fix error path in add_update_sla() ANBZ: #8589 commit 6627f03c21cb7001ae4dbbfb7a8514516d02331c upstream. Intel-SIG: commit 6627f03c21cb crypto: qat - fix error path in add_update_sla() Backport to support Intel QAT in-tree driver The input argument `sla_in` is a pointer to a structure that contains the parameters of the SLA which is being added or updated. If this pointer is NULL, the function should return an error as the data required for the algorithm is not available. By mistake, the logic jumps to the error path which dereferences the pointer. This results in a warnings reported by the static analyzer Smatch when executed without a database: drivers/crypto/intel/qat/qat_common/adf_rl.c:871 add_update_sla() error: we previously assumed 'sla_in' could be null (see line 812) This issue was not found in internal testing as the pointer cannot be NULL. The function add_update_sla() is only called (indirectly) by the rate limiting sysfs interface implementation in adf_sysfs_rl.c which ensures that the data structure is allocated and valid. This is also proven by the fact that Smatch executed with a database does not report such error. Fix it by returning with error if the pointer `sla_in` is NULL. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Reported-by: Dan Carpenter Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 86e3e2152b1b..f2de3cd7d05d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -812,8 +812,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, if (!sla_in) { dev_warn(&GET_DEV(accel_dev), "SLA input data pointer is missing\n"); - ret = -EFAULT; - goto ret_err; + return -EFAULT; } /* Input validation */ -- Gitee From a8acd149cdf60d79b688c47e6f561149ad56baff Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 28 Nov 2023 18:39:30 +0100 Subject: [PATCH 748/953] crypto: qat - fix mutex ordering in adf_rl ANBZ: #8589 commit 487caa8d5ef9a9a27b092c5790d529a7a0c24f8b upstream. Intel-SIG: commit 487caa8d5ef9 crypto: qat - fix mutex ordering in adf_rl Backport to support Intel QAT in-tree driver If the function validate_user_input() returns an error, the error path attempts to unlock an unacquired mutex. Acquire the mutex before calling validate_user_input(). This is not strictly necessary but simplifies the code. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index f2de3cd7d05d..de1b214dba1f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -815,13 +815,13 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, return -EFAULT; } + mutex_lock(&rl_data->rl_lock); + /* Input validation */ ret = validate_user_input(accel_dev, sla_in, is_update); if (ret) goto ret_err; - mutex_lock(&rl_data->rl_lock); - if (is_update) { ret = validate_sla_id(accel_dev, sla_in->sla_id); if (ret) -- Gitee From 53e1cc6e91e809dd4b4b53b61d7fc0bb3d121a7c Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 28 Nov 2023 19:17:25 +0000 Subject: [PATCH 749/953] crypto: qat - add NULL pointer check ANBZ: #8589 commit a643212c9f28d09225c3792c316bc4aaf6be4a68 upstream. Intel-SIG: commit a643212c9f28 crypto: qat - add NULL pointer check Backport to support Intel QAT in-tree driver There is a possibility that the function adf_devmgr_pci_to_accel_dev() might return a NULL pointer. Add a NULL pointer check in the function rp2srv_show(). Fixes: dbc8876dd873 ("crypto: qat - add rp2svc sysfs attribute") Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: David Guckian Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 6f0b3629da13..d450dad32c9e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -215,6 +215,9 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, enum adf_cfg_service_type svc; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + hw_data = GET_HW_DATA(accel_dev); if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) -- Gitee From 37c6c0c12dba0c3fedcde1168b55dda5164b1178 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:45 -0500 Subject: [PATCH 750/953] crypto: qat - change signature of uof_get_num_objs() ANBZ: #8589 commit b34bd0fd563df763ccca998b3d5fc824c536c28a upstream. Intel-SIG: commit b34bd0fd563d crypto: qat - change signature of uof_get_num_objs() Backport to support Intel QAT in-tree driver Add accel_dev as parameter of the function uof_get_num_objs(). This is in preparation for the introduction of the QAT 420xx driver as it will allow to reconfigure the ae_mask when a configuration that does not require all AEs is loaded on the device. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 2 +- drivers/crypto/intel/qat/qat_common/adf_accel_engine.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index b64aaecdd98b..a6147ed1603a 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -390,7 +390,7 @@ static int adf_init_device(struct adf_accel_dev *accel_dev) return ret; } -static u32 uof_get_num_objs(void) +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 9d5fdd529a2e..33de8855fd66 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -241,7 +241,7 @@ struct adf_hw_device_data { void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); - u32 (*uof_get_num_objs)(void); + u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c index 6be064dc64c8..4b5d0350fc2e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c @@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, int i; loader = loader_data->fw_loader; - num_objs = hw_device->uof_get_num_objs(); + num_objs = hw_device->uof_get_num_objs(accel_dev); for (i = 0; i < num_objs; i++) { obj_name = hw_device->uof_get_name(accel_dev, i); -- Gitee From 14a40ffb49c6cfc4550c718d484483d551edb522 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:46 -0500 Subject: [PATCH 751/953] crypto: qat - relocate portions of qat_4xxx code ANBZ: #8589 commit de51d22364921dcdb28ef34cd6276c38e126b899 upstream. Intel-SIG: commit de51d2236492 crypto: qat - relocate portions of qat_4xxx code Backport to support Intel QAT in-tree driver Move logic that is common between QAT GEN4 accelerators to the qat_common folder. This includes addresses of CSRs, setters and configuration logic. When moved, functions and defines have been renamed from 4XXX to GEN4. Code specific to the device is moved to the file adf_gen4_hw_data.c. Code related to configuration is moved to the newly created adf_gen4_config.c. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 188 ++---------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 52 ---- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 277 +---------------- drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_gen4_config.c | 287 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_config.h | 11 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 148 +++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 72 +++++ 8 files changed, 552 insertions(+), 484 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_config.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_config.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index a6147ed1603a..4348bc76e9d2 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -120,11 +121,6 @@ static struct adf_hw_device_class adf_4xxx_class = { .instances = 0, }; -static u32 get_accel_mask(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ACCELERATORS_MASK; -} - static u32 get_ae_mask(struct adf_hw_device_data *self) { u32 me_disable = self->fuses; @@ -132,55 +128,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - return ADF_4XXX_MAX_ACCELERATORS; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - if (!self || !self->ae_mask) - return 0; - - return hweight32(self->ae_mask); -} - -static u32 get_misc_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_PMISC_BAR; -} - -static u32 get_etr_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ETR_BAR; -} - -static u32 get_sram_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_SRAM_BAR; -} - -/* - * The vector routing table is used to select the MSI-X entry to use for each - * interrupt source. - * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. - * The final entry corresponds to VF2PF or error interrupts. - * This vector table could be used to configure one MSI-X entry to be shared - * between multiple interrupt sources. - * - * The default routing is set to have a one to one correspondence between the - * interrupt source and the MSI-X entry used. - */ -static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr; - int i; - - csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) - ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); -} - static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; @@ -189,7 +136,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) u32 fusectl1; /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | @@ -204,27 +151,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_AES_V2; /* A set bit in fusectl1 means the feature is OFF in this SKU */ - if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; } @@ -234,7 +181,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; @@ -245,7 +192,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; @@ -281,11 +228,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } } -static enum dev_sku_info get_sku(struct adf_hw_device_data *self) -{ - return DEV_SKU_1; -} - static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { @@ -298,28 +240,6 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) } } -static void get_arb_info(struct arb_info *arb_info) -{ - arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG; - arb_info->arb_offset = ADF_4XXX_ARB_OFFSET; - arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; -} - -static void get_admin_info(struct admin_info *admin_csrs_info) -{ - admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; - admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; - admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; -} - -static u32 get_heartbeat_clock(struct adf_hw_device_data *self) -{ - /* - * 4XXX uses KPT counter for HB - */ - return ADF_4XXX_KPT_COUNTER_FREQ; -} - static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) { rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; @@ -338,58 +258,6 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; } -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; - void __iomem *csr = misc_bar->virt_addr; - - /* Enable all in errsou3 except VFLR notification on host */ - ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); -} - -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Enable bundle interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); - - /* Enable misc interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); -} - -static int adf_init_device(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - u32 status; - u32 csr; - int ret; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Temporarily mask PM interrupt */ - csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); - csr |= ADF_GEN4_PM_SOU; - ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); - - /* Set DRV_ACTIVE bit to power up the device */ - ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); - - /* Poll status register to make sure the device is powered up */ - ret = read_poll_timeout(ADF_CSR_RD, status, - status & ADF_GEN4_PM_INIT_STATE, - ADF_GEN4_PM_POLL_DELAY_US, - ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, - ADF_GEN4_PM_STATUS); - if (ret) - dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); - - return ret; -} - static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); @@ -538,37 +406,37 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; - hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; - hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF; - hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; - hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; - hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; - hw_data->get_accel_mask = get_accel_mask; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; hw_data->get_ae_mask = get_ae_mask; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; - hw_data->get_sram_bar_id = get_sram_bar_id; - hw_data->get_etr_bar_id = get_etr_bar_id; - hw_data->get_misc_bar_id = get_misc_bar_id; - hw_data->get_arb_info = get_arb_info; - hw_data->get_admin_info = get_admin_info; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; hw_data->get_accel_cap = get_accel_cap; - hw_data->get_sku = get_sku; + hw_data->get_sku = adf_gen4_get_sku; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; - hw_data->init_device = adf_init_device; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; switch (dev_id) { @@ -585,7 +453,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) } hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; - hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; @@ -595,7 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->dev_config = adf_gen4_dev_config; hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; - hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index 33423295e90f..76388363ea87 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -6,25 +6,8 @@ #include #include -/* PCIe configuration space */ -#define ADF_4XXX_SRAM_BAR 0 -#define ADF_4XXX_PMISC_BAR 1 -#define ADF_4XXX_ETR_BAR 2 -#define ADF_4XXX_RX_RINGS_OFFSET 1 -#define ADF_4XXX_TX_RINGS_MASK 0x1 -#define ADF_4XXX_MAX_ACCELERATORS 1 #define ADF_4XXX_MAX_ACCELENGINES 9 -#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) -/* Physical function fuses */ -#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8) -#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC) -#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0) -#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4) -#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8) -#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC) - -#define ADF_4XXX_ACCELERATORS_MASK (0x1) #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) @@ -45,28 +28,6 @@ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) -#define ADF_4XXX_ETR_MAX_BANKS 64 - -/* MSIX interrupt */ -#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040) -#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044) -#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084) -#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) - -/* Bank and ring configuration */ -#define ADF_4XXX_NUM_RINGS_PER_BANK 2 -#define ADF_4XXX_NUM_BANKS_PER_VF 4 - -/* Arbiter configuration */ -#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_4XXX_ARB_OFFSET (0x0) -#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400) - -/* Admin Interface Reg Offset */ -#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574) -#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) -#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) - /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" #define ADF_4XXX_MMP "qat_4xxx_mmp.bin" @@ -93,22 +54,9 @@ #define ADF_4XXX_RL_SLICE_REF 1000UL /* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) #define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) -/* qat_4xxx fuse bits are different from old GENs, redefine them */ -enum icp_qat_4xxx_slice_mask { - ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), - ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1), - ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2), - ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), - ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), - ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), - ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7), -}; - void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 8f483d1197dd..9762f2bf7727 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -8,13 +8,10 @@ #include #include #include -#include +#include +#include #include "adf_4xxx_hw_data.h" -#include "adf_cfg_services.h" -#include "qat_compression.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, @@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) -{ - const char *config; - int ret; - - config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; - - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - /* Default configuration is crypto only for even devices - * and compression for odd devices - */ - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, config, - ADF_STR); - if (ret) - return ret; - - adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); - - return 0; -} - -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long bank, val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_crypto(accel_dev)) - instances = min(cpus, banks / 2); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - bank = i * 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - bank += 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); - return ret; -} - -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_compression(accel_dev)) - instances = min(cpus, banks); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); - return ret; -} - -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) -{ - unsigned long val; - int ret; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); -} - -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: - ret = adf_crypto_dev_config(accel_dev); - break; - case SVC_DC: - case SVC_DCC: - ret = adf_comp_dev_config(accel_dev); - break; - default: - ret = adf_no_dev_config(accel_dev); - break; - } - - if (ret) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - return ret; - -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); - return ret; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); @@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } - ret = adf_cfg_dev_init(accel_dev); + ret = adf_gen4_cfg_dev_init(accel_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize configuration.\n"); goto out_err; @@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Find and map all the device's BARS */ - bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); if (ret) { diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 779a8aa0b8d2..928de6997155 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \ adf_sysfs_ras_counters.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ + adf_gen4_config.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c new file mode 100644 index 000000000000..fe1f3d727dc5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_services.h" +#include "adf_cfg_strings.h" +#include "adf_common_drv.h" +#include "adf_gen4_config.h" +#include "adf_heartbeat.h" +#include "adf_transport_access_macros.h" +#include "qat_compression.h" +#include "qat_crypto.h" + +static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long bank, val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_crypto(accel_dev)) + instances = min(cpus, banks / 2); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + bank = i * 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + bank += 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +{ + unsigned long val; + int ret; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); +} + +/** + * adf_gen4_dev_config() - create dev config required to create instances + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create instances + * + * Return: 0 on success, error code otherwise. + */ +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + goto err; + + ret = sysfs_match_string(adf_cfg_services, services); + if (ret < 0) + goto err; + + switch (ret) { + case SVC_CY: + case SVC_CY2: + ret = adf_crypto_dev_config(accel_dev); + break; + case SVC_DC: + case SVC_DCC: + ret = adf_comp_dev_config(accel_dev); + break; + default: + ret = adf_no_dev_config(accel_dev); + break; + } + + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_dev_config); + +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + const char *config; + int ret; + + config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; + + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + /* Default configuration is crypto only for even devices + * and compression for odd devices + */ + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, config, + ADF_STR); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h new file mode 100644 index 000000000000..bb87655f69a8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_CONFIG_H_ +#define ADF_GEN4_CONFIG_H_ + +#include "adf_accel_devices.h" + +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 3148a62938fd..ee08b34876dd 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -4,6 +4,7 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" +#include "adf_gen4_pm.h" static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) { @@ -102,6 +103,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ACCELERATORS_MASK; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask); + +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_GEN4_MAX_ACCELERATORS; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels); + +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self) +{ + if (!self || !self->ae_mask) + return 0; + + return hweight32(self->ae_mask); +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes); + +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_PMISC_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id); + +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ETR_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id); + +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_SRAM_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id); + +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sku); + +void adf_gen4_get_arb_info(struct arb_info *arb_info) +{ + arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN4_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info); + +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info); + +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self) +{ + /* + * GEN4 uses KPT counter for HB + */ + return ADF_GEN4_KPT_COUNTER_FREQ; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock); + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + + /* Enable all in errsou3 except VFLR notification on host */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction); + +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_ints); + +int adf_gen4_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); + csr |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN4_PM_INIT_STATE, + ADF_GEN4_PM_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN4_PM_STATUS); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_device); + static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) { @@ -135,6 +261,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + int i; + + csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable); + int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev) { return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 1813fe1d5a06..b42fb8048c04 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -3,9 +3,55 @@ #ifndef ADF_GEN4_HW_CSR_DATA_H_ #define ADF_GEN4_HW_CSR_DATA_H_ +#include + #include "adf_accel_devices.h" #include "adf_cfg_common.h" +/* PCIe configuration space */ +#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN4_SRAM_BAR 0 +#define ADF_GEN4_PMISC_BAR 1 +#define ADF_GEN4_ETR_BAR 2 + +/* Clocks frequency */ +#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0 +#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4 +#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8 +#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC + +/* Accelerators */ +#define ADF_GEN4_ACCELERATORS_MASK 0x1 +#define ADF_GEN4_MAX_ACCELERATORS 1 + +/* MSIX interrupt */ +#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) + +/* Bank and ring configuration */ +#define ADF_GEN4_NUM_RINGS_PER_BANK 2 +#define ADF_GEN4_NUM_BANKS_PER_VF 4 +#define ADF_GEN4_ETR_MAX_BANKS 64 +#define ADF_GEN4_RX_RINGS_OFFSET 1 +#define ADF_GEN4_TX_RINGS_MASK 0x1 + +/* Arbiter configuration */ +#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN4_ARB_OFFSET 0x0 +#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400 + +/* Admin Interface Reg Offset */ +#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 + /* Transport access */ #define ADF_BANK_INT_SRC_SEL_MASK 0x44UL #define ADF_RING_CSR_RING_CONFIG 0x1000 @@ -147,6 +193,32 @@ do { \ #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); + +enum icp_qat_gen4_slice_mask { + ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0), + ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3), + ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), + ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), + ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), +}; + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info); +void adf_gen4_get_arb_info(struct arb_info *arb_info); +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self); +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); +int adf_gen4_init_device(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); #endif -- Gitee From c4b79314be7a300c7b44bb57ad33dff62068dcae Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:47 -0500 Subject: [PATCH 752/953] crypto: qat - move fw config related structures ANBZ: #8589 commit 98a4f29fba0ffc1f1b026d9cb717fbe7edd66ffe upstream. Intel-SIG: commit 98a4f29fba0f crypto: qat - move fw config related structures Backport to support Intel QAT in-tree driver Relocate the structures adf_fw_objs and adf_fw_config from the file adf_4xxx_hw_data.c to the newly created adf_fw_config.h. These structures will be used by new device drivers. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 13 +------------ .../intel/qat/qat_common/adf_fw_config.h | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_fw_config.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 4348bc76e9d2..9826ce514d44 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -21,13 +22,6 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) -enum adf_fw_objs { - ADF_FW_SYM_OBJ, - ADF_FW_ASYM_OBJ, - ADF_FW_DC_OBJ, - ADF_FW_ADMIN_OBJ, -}; - static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, @@ -42,11 +36,6 @@ static const char * const adf_402xx_fw_objs[] = { [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, }; -struct adf_fw_config { - u32 ae_mask; - enum adf_fw_objs obj; -}; - static const struct adf_fw_config adf_fw_cy_config[] = { {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h new file mode 100644 index 000000000000..4f86696800c9 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_FW_CONFIG_H_ +#define ADF_FW_CONFIG_H_ + +enum adf_fw_objs { + ADF_FW_SYM_OBJ, + ADF_FW_ASYM_OBJ, + ADF_FW_DC_OBJ, + ADF_FW_ADMIN_OBJ, +}; + +struct adf_fw_config { + u32 ae_mask; + enum adf_fw_objs obj; +}; + +#endif -- Gitee From 7aa1fa045d2afc2a6351da51b8ab9c0643ec27b5 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:48 -0500 Subject: [PATCH 753/953] crypto: qat - add support for 420xx devices ANBZ: #8589 commit fcf60f4bcf54952cc14d14178c358be222dbeb43 upstream. Intel-SIG: commit fcf60f4bcf54 crypto: qat - add support for 420xx devices Backport to support Intel QAT in-tree driver Add support for 420xx devices by including a new device driver that supports such devices, updates to the firmware loader and capabilities. Compared to 4xxx devices, 420xx devices have more acceleration engines (16 service engines and 1 admin) and support the wireless cipher algorithms ZUC and Snow 3G. Signed-off-by: Jie Wang Co-developed-by: Dong Xie Signed-off-by: Dong Xie Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/Kconfig | 11 + drivers/crypto/intel/qat/Makefile | 1 + drivers/crypto/intel/qat/qat_420xx/Makefile | 4 + .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 552 ++++++++++++++++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.h | 55 ++ drivers/crypto/intel/qat/qat_420xx/adf_drv.c | 202 +++++++ .../intel/qat/qat_common/adf_accel_devices.h | 3 + .../intel/qat/qat_common/adf_cfg_common.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 2 + .../crypto/intel/qat/qat_common/icp_qat_hw.h | 14 +- .../intel/qat/qat_common/icp_qat_uclo.h | 2 +- drivers/crypto/intel/qat/qat_common/qat_hal.c | 6 +- .../crypto/intel/qat/qat_common/qat_uclo.c | 1 + 13 files changed, 849 insertions(+), 5 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_420xx/Makefile create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_drv.c diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 1220cc86f910..c120f6715a09 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX To compile this as a module, choose M here: the module will be called qat_4xxx. +config CRYPTO_DEV_QAT_420XX + tristate "Support for Intel(R) QAT_420XX" + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_420xx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_420xx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 258c8a626ce0..235b69f4f3f7 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile new file mode 100644 index 000000000000..a90fbe00b3c8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y := -I $(srctree)/$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o +qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c new file mode 100644 index 000000000000..d296eb18db3c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_420xx_hw_data.h" +#include "icp_qat_hw.h" + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 GENMASK(11, 8) +#define ADF_AE_GROUP_3 GENMASK(15, 12) +#define ADF_AE_GROUP_4 BIT(16) + +static const char * const adf_420xx_fw_objs[] = { + [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, + [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, + [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_fw_cy_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_dc_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_dc_config[] = { + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dcc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +/* Worker thread to service arbiter mappings */ +static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x0 +}; + +static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x0 +}; + +static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0 +}; + +static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x0 +}; + +static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static struct adf_hw_device_class adf_420xx_class = { + .name = ADF_420XX_DEVICE_NAME, + .type = DEV_420XX, + .instances = 0, +}; + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + u32 me_disable = self->fuses; + + return ~me_disable & ADF_420XX_ACCELENGINES_MASK; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return ARRAY_SIZE(adf_fw_cy_config); + case SVC_DC: + return ARRAY_SIZE(adf_fw_dc_config); + case SVC_DCC: + return ARRAY_SIZE(adf_fw_dcc_config); + case SVC_SYM: + return ARRAY_SIZE(adf_fw_sym_config); + case SVC_ASYM: + return ARRAY_SIZE(adf_fw_asym_config); + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return ARRAY_SIZE(adf_fw_asym_dc_config); + case SVC_SYM_DC: + case SVC_DC_SYM: + return ARRAY_SIZE(adf_fw_sym_dc_config); + default: + return 0; + } +} + +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return adf_fw_cy_config; + case SVC_DC: + return adf_fw_dc_config; + case SVC_DCC: + return adf_fw_dcc_config; + case SVC_SYM: + return adf_fw_sym_config; + case SVC_ASYM: + return adf_fw_asym_config; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return adf_fw_asym_dc_config; + case SVC_SYM_DC: + case SVC_DC_SYM: + return adf_fw_sym_dc_config; + default: + return NULL; + } +} + +static void update_ae_mask(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + const struct adf_fw_config *fw_config; + u32 config_ae_mask = 0; + u32 ae_mask, num_objs; + int i; + + ae_mask = get_ae_mask(hw_data); + + /* Modify the AE mask based on the firmware configuration loaded */ + fw_config = get_fw_config(accel_dev); + num_objs = uof_get_num_objs(accel_dev); + + config_ae_mask |= ADF_420XX_ADMIN_AE_MASK; + for (i = 0; i < num_objs; i++) + config_ae_mask |= fw_config[i].ae_mask; + + hw_data->ae_mask = ae_mask & config_ae_mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym, capabilities_dc; + struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 capabilities_dcc; + u32 fusectl1; + + /* As a side effect, update ae_mask based on configuration */ + update_ae_mask(accel_dev); + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); + + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_HKDF | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_SM3 | + ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_AES_V2 | + ICP_ACCEL_CAPABILITIES_ZUC | + ICP_ACCEL_CAPABILITIES_ZUC_256 | + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; + + /* A set bit in fusectl1 means the feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_SM2 | + ICP_ACCEL_CAPABILITIES_ECEDMONT; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; + } + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return capabilities_sym | capabilities_asym; + case SVC_DC: + return capabilities_dc; + case SVC_DCC: + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + capabilities_dcc = capabilities_dc | capabilities_sym; + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + return capabilities_dcc; + case SVC_SYM: + return capabilities_sym; + case SVC_ASYM: + return capabilities_asym; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return capabilities_asym | capabilities_dc; + case SVC_SYM_DC: + case SVC_DC_SYM: + return capabilities_sym | capabilities_dc; + default: + return 0; + } +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_ASYM: + return thrd_to_arb_map_asym; + case SVC_SYM: + return thrd_to_arb_map_sym; + case SVC_DC: + return thrd_to_arb_map_dc; + case SVC_DCC: + return thrd_to_arb_map_dcc; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return thrd_to_arb_map_asym_dc; + case SVC_DC_SYM: + case SVC_SYM_DC: + return thrd_to_arb_map_sym_dc; + default: + return default_thrd_to_arb_map; + } +} + +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; +} + +enum adf_rp_groups { + RP_GROUP_0 = 0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + +static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + const struct adf_fw_config *fw_config; + u16 ring_to_svc_map; + int i, j; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + for (i = 0; i < RP_GROUP_COUNT; i++) { + switch (fw_config[i].ae_mask) { + case ADF_AE_GROUP_0: + j = RP_GROUP_0; + break; + case ADF_AE_GROUP_1: + j = RP_GROUP_1; + break; + default: + return 0; + } + + switch (fw_config[i].obj) { + case ADF_FW_SYM_OBJ: + rps[j] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[j] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[j] = COMP; + break; + default: + rps[j] = 0; + break; + } + } + + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, + const char * const fw_objs[], int num_objs) +{ + const struct adf_fw_config *fw_config; + int id; + + fw_config = get_fw_config(accel_dev); + if (fw_config) + id = fw_config[obj_num].obj; + else + id = -EINVAL; + + if (id < 0 || id > num_objs) + return NULL; + + return fw_objs[id]; +} + +static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs); + + return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + return fw_config[obj_num].ae_mask; +} + +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; +} + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) +{ + hw_data->dev_class = &adf_420xx_class; + hw_data->instance_id = adf_420xx_class.instances++; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; + hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; + hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = adf_gen4_get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->fw_name = ADF_420XX_FW; + hw_data->fw_mmp_name = ADF_420XX_MMP; + hw_data->uof_get_name = uof_get_name_420xx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->enable_pm = adf_gen4_enable_pm; + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_gen4_dev_config; + hw_data->start_timer = adf_gen4_timer_start; + hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_420XX_AE_FREQ; + + adf_gen4_set_err_mask(&hw_data->dev_err_mask); + adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_init_rl_data(&hw_data->rl_data); +} + +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h new file mode 100644 index 000000000000..99abbfc14820 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_420XX_HW_DATA_H_ +#define ADF_420XX_HW_DATA_H_ + +#include + +#define ADF_420XX_MAX_ACCELENGINES 17 + +#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF +#define ADF_420XX_ADMIN_AE_MASK 0x10000 + +#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF) +#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF) +#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001) +#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007) +#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF) +#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF) + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(27) - enable parity detection on SPPs + */ +#define ADF_420XX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) + +/* Firmware Binaries */ +#define ADF_420XX_FW "qat_420xx.bin" +#define ADF_420XX_MMP "qat_420xx_mmp.bin" +#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin" +#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin" +#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin" +#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin" + +/* RL constants */ +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_420XX_RL_DCPR_CORRECTION 1 +#define ADF_420XX_RL_SCANS_PER_SEC 954 +#define ADF_420XX_RL_MAX_TP_ASYM 173750UL +#define ADF_420XX_RL_MAX_TP_SYM 95000UL +#define ADF_420XX_RL_MAX_TP_DC 40000UL +#define ADF_420XX_RL_SLICE_REF 1000UL + +/* Clocks frequency */ +#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ) + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id); +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data); + +#endif diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c new file mode 100644 index 000000000000..2a3598409eeb --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "adf_420xx_hw_data.h" + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->hw_device) { + adf_clean_hw_data_420xx(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_dbgfs_exit(accel_dev); + adf_cfg_dev_remove(accel_dev); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + unsigned int i, bar_nr; + unsigned long bar_mask; + struct adf_bar *bar; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* + * Add accel device to accel table + * This should be called before adf_cleanup_accel is called + */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and initialise device hardware meta-data structure */ + hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found.\n"); + ret = -EFAULT; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Can't enable PCI device.\n"); + goto out_err; + } + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto out_err; + } + + ret = adf_gen4_cfg_dev_init(accel_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize configuration.\n"); + goto out_err; + } + + /* Get accelerator capabilities mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; + goto out_err; + } + + /* Find and map all the device's BARS */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; + + ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Failed to map pci regions.\n"); + goto out_err; + } + + i = 0; + for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { + bar = &accel_pci_dev->pci_bars[i++]; + bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + } + + pci_set_master(pdev); + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state.\n"); + ret = -ENOMEM; + goto out_err; + } + + accel_dev->ras_errors.enabled = true; + adf_dbgfs_init(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto out_err_dev_stop; + + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_down(accel_dev, false); +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + adf_dev_down(accel_dev, false); + adf_cleanup_accel(accel_dev); +} + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_420XX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_420XX_FW); +MODULE_FIRMWARE(ADF_420XX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 33de8855fd66..7df6336ddd62 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -19,12 +19,15 @@ #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" +#define ADF_420XX_DEVICE_NAME "420xx" #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define ADF_402XX_PCI_DEVICE_ID 0x4944 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 +#define ADF_420XX_PCI_DEVICE_ID 0x4946 +#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 6e5de1dab97b..89df3888d7ea 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -47,6 +47,7 @@ enum adf_device_type { DEV_C3XXX, DEV_C3XXXVF, DEV_4XXX, + DEV_420XX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index b42fb8048c04..051ad20581a6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -202,6 +202,8 @@ enum icp_qat_gen4_slice_mask { ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), + ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), + ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), }; void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index eb2ef225bcee..b8f1c4ffb8b5 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -18,7 +18,12 @@ enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_9 = 9, ICP_QAT_HW_AE_10 = 10, ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 + ICP_QAT_HW_AE_12 = 12, + ICP_QAT_HW_AE_13 = 13, + ICP_QAT_HW_AE_14 = 14, + ICP_QAT_HW_AE_15 = 15, + ICP_QAT_HW_AE_16 = 16, + ICP_QAT_HW_AE_DELIMITER = 17 }; enum icp_qat_hw_qat_id { @@ -95,7 +100,7 @@ enum icp_qat_capabilities_mask { /* Bits 10-11 are currently reserved */ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12), ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13), - /* Bit 14 is currently reserved */ + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14), ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), @@ -107,7 +112,10 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25), - ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26) + ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26), + /* Bits 27-28 are currently reserved */ + ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29), + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30), }; #define QAT_AUTH_MODE_BITPOS 4 diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 69482abdb8b9..e28241bdd0f4 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,7 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 -#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index cbb946a80076..317cafa9d11f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - handle->chip_info->icp_rst_mask = 0x100015; + if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + handle->chip_info->icp_rst_mask = 0x100155; + else + handle->chip_info->icp_rst_mask = 0x100015; handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0; handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX; handle->chip_info->wakeup_event_val = 0x80000000; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index e27ea7e28c51..ad2c64af7427 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", -- Gitee From ae9938b34a844eb39c8398c4e3eb0f868a397a7c Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:05 +0100 Subject: [PATCH 754/953] crypto: qat - include pci.h for GET_DEV() ANBZ: #8589 commit b6e4b6eb1e6393580482581470a3a08c15ab977b upstream. Intel-SIG: commit b6e4b6eb1e63 crypto: qat - include pci.h for GET_DEV() Backport to support Intel QAT in-tree driver GET_DEV() macro expansion relies on struct pci_dev being defined. Include at adf_accel_devices.h. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 7df6336ddd62..fc7786d71e96 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_cfg_common.h" -- Gitee From 900588ab10b843cf256e3d12dfdac8e055632861 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:06 +0100 Subject: [PATCH 755/953] crypto: qat - add admin msgs for telemetry ANBZ: #8589 commit 7f06679dd54a331d750e5d6f6f04a9df2eba72ff upstream. Intel-SIG: commit 7f06679dd54a crypto: qat - add admin msgs for telemetry Backport to support Intel QAT in-tree driver Extend the admin interface with two new public APIs to enable and disable the telemetry feature: adf_send_admin_tl_start() and adf_send_admin_tl_stop(). The first, sends to the firmware, through the ICP_QAT_FW_TL_START message, the IO address where the firmware will write telemetry metrics and a list of ring pairs (maximum 4) to be monitored. It returns the number of accelerators of each type supported by this hardware. After this message is sent, the firmware starts periodically reporting telemetry data using by writing into the dma buffer specified as input. The second, sends the admin message ICP_QAT_FW_TL_STOP which stops the reporting of telemetry data. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_admin.c | 37 +++++++++++++++++++ .../crypto/intel/qat/qat_common/adf_admin.h | 4 ++ .../qat/qat_common/icp_qat_fw_init_admin.h | 10 +++++ 3 files changed, 51 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 54b673ec2362..acad526eb741 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -498,6 +498,43 @@ int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, return ret; } +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_TL_START; + req.init_cfg_ptr = tl_dma_addr; + req.init_cfg_sz = layout_sz; + + if (rp_indexes) + memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes)); + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slice_count, &resp.slices, sizeof(*slice_count)); + + return 0; +} + +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + u32 ae_mask = hw_data->admin_ae_mask; + + req.cmd_id = ICP_QAT_FW_TL_STOP; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h index 55cbcbc66c9f..647c8e196752 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.h +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -23,5 +23,9 @@ int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count); +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index cd418b51d9f3..63cf18e2a4e5 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -29,6 +29,8 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_RL_ADD = 134, ICP_QAT_FW_RL_UPDATE = 135, ICP_QAT_FW_RL_REMOVE = 136, + ICP_QAT_FW_TL_START = 137, + ICP_QAT_FW_TL_STOP = 138, }; enum icp_qat_fw_init_admin_resp_status { @@ -36,6 +38,13 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_tl_rp_indexes { + __u8 rp_num_index_0; + __u8 rp_num_index_1; + __u8 rp_num_index_2; + __u8 rp_num_index_3; +}; + struct icp_qat_fw_init_admin_slice_cnt { __u8 cpr_cnt; __u8 xlt_cnt; @@ -87,6 +96,7 @@ struct icp_qat_fw_init_admin_req { __u8 rp_count; }; __u32 idle_filter; + struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes; }; __u32 resrvd4; -- Gitee From a148027b7f05a4177d9772ea93a8a493aa6a076c Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:07 +0100 Subject: [PATCH 756/953] crypto: qat - add support for device telemetry ANBZ: #8589 commit 69e7649f7cc2aaa7889174456d39319a623c1a18 upstream. Intel-SIG: commit 69e7649f7cc2 crypto: qat - add support for device telemetry Backport to support Intel QAT in-tree driver Expose through debugfs device telemetry data for QAT GEN4 devices. This allows to gather metrics about the performance and the utilization of a device. In particular, statistics on (1) the utilization of the PCIe channel, (2) address translation, when SVA is enabled and (3) the internal engines for crypto and data compression. If telemetry is supported by the firmware, the driver allocates a DMA region and a circular buffer. When telemetry is enabled, through the `control` attribute in debugfs, the driver sends to the firmware, via the admin interface, the `TL_START` command. This triggers the device to periodically gather telemetry data from hardware registers and write it into the DMA memory region. The device writes into the shared region every second. The driver, every 500ms, snapshots the DMA shared region into the circular buffer. This is then used to compute basic metric (min/max/average) on each counter, every time the `device_data` attribute is queried. Telemetry counters are exposed through debugfs in the folder /sys/kernel/debug/qat__/telemetry. For details, refer to debugfs-driver-qat_telemetry in Documentation/ABI. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/debugfs-driver-qat_telemetry | 103 ++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 2 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 + drivers/crypto/intel/qat/qat_common/Makefile | 3 + .../intel/qat/qat_common/adf_accel_devices.h | 4 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../crypto/intel/qat/qat_common/adf_gen4_tl.c | 118 ++++ .../crypto/intel/qat/qat_common/adf_gen4_tl.h | 121 +++++ .../crypto/intel/qat/qat_common/adf_init.c | 12 + .../intel/qat/qat_common/adf_telemetry.c | 271 ++++++++++ .../intel/qat/qat_common/adf_telemetry.h | 92 ++++ .../intel/qat/qat_common/adf_tl_debugfs.c | 502 ++++++++++++++++++ .../intel/qat/qat_common/adf_tl_debugfs.h | 106 ++++ 13 files changed, 1339 insertions(+) create mode 100644 Documentation/ABI/testing/debugfs-driver-qat_telemetry create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_telemetry.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_telemetry.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry new file mode 100644 index 000000000000..24532365387c --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -0,0 +1,103 @@ +What: /sys/kernel/debug/qat__/telemetry/control +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Enables/disables the reporting of telemetry metrics. + + Allowed values to write: + ======================== + * 0: disable telemetry + * 1: enable telemetry + * 2, 3, 4: enable telemetry and calculate minimum, maximum + and average for each counter over 2, 3 or 4 samples + + Returned values: + ================ + * 1-4: telemetry is enabled and running + * 0: telemetry is disabled + + Example. + + Writing '3' to this file starts the collection of + telemetry metrics. Samples are collected every second and + stored in a circular buffer of size 3. These values are then + used to calculate the minimum, maximum and average for each + counter. After enabling, counters can be retrieved through + the ``device_data`` file:: + + echo 3 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + Writing '0' to this file stops the collection of telemetry + metrics:: + + echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/device_data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RO) Reports device telemetry counters. + Reads report metrics about performance and utilization of + a QAT device: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms. + pci_trans_cnt number of PCIe partial transactions + max_rd_lat maximum logged read latency [ns] (could + be any read operation) + rd_lat_acc_avg average read latency [ns] + max_gp_lat max get to put latency [ns] (only takes + samples for AE0) + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_page_req_lat_avg Address Translator(AT), average page + request latency [ns] + at_trans_lat_avg AT, average page translation latency [ns] + at_max_tlb_used AT, maximum uTLB used + util_cpr utilization of Compression slice N [%] + exec_cpr execution count of Compression slice N + util_xlt utilization of Translator slice N [%] + exec_xlt execution count of Translator slice N + util_dcpr utilization of Decompression slice N [%] + exec_dcpr execution count of Decompression slice N + util_pke utilization of PKE N [%] + exec_pke execution count of PKE N + util_ucs utilization of UCS slice N [%] + exec_ucs execution count of UCS slice N + util_wat utilization of Wireless Authentication + slice N [%] + exec_wat execution count of Wireless Authentication + slice N + util_wcp utilization of Wireless Cipher slice N [%] + exec_wcp execution count of Wireless Cipher slice N + util_cph utilization of Cipher slice N [%] + exec_cph execution count of Cipher slice N + util_ath utilization of Authentication slice N [%] + exec_ath execution count of Authentication slice N + ======================= ======================================== + + The telemetry report file can be read with the following command:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/device_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + If a device lacks of a specific accelerator, the corresponding + attribute is not reported. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index d296eb18db3c..a7730d8057d6 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -543,6 +544,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 9826ce514d44..e988a04a34ef 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -15,6 +15,7 @@ #include #include "adf_gen4_ras.h" #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -461,6 +462,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 928de6997155..6908727bff3b 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -41,9 +41,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ + adf_gen4_tl.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ adf_pm_dbgfs.o \ + adf_telemetry.o \ + adf_tl_debugfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index fc7786d71e96..b274ebc799c9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -11,6 +11,7 @@ #include #include "adf_cfg_common.h" #include "adf_rl.h" +#include "adf_telemetry.h" #include "adf_pfvf_msg.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" @@ -254,6 +255,7 @@ struct adf_hw_device_data { struct adf_ras_ops ras_ops; struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; + struct adf_tl_hw_data tl_data; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -308,6 +310,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev struct adf_admin_comms; @@ -356,6 +359,7 @@ struct adf_accel_dev { struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; + struct adf_telemetry *telemetry; struct adf_dc_data *dc_data; struct adf_pm power_management; struct list_head crypto_list; diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 477efcc81a16..c42f5c25aabd 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -10,6 +10,7 @@ #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" #include "adf_pm_dbgfs.h" +#include "adf_tl_debugfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -66,6 +67,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) adf_heartbeat_dbgfs_add(accel_dev); adf_pm_dbgfs_add(accel_dev); adf_cnv_dbgfs_add(accel_dev); + adf_tl_dbgfs_add(accel_dev); } } @@ -79,6 +81,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) return; if (!accel_dev->is_vf) { + adf_tl_dbgfs_rm(accel_dev); adf_cnv_dbgfs_rm(accel_dev); adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c new file mode 100644 index 000000000000..4efbe6bc651c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#include +#include + +#include "adf_gen4_tl.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ + ADF_TL_COUNTER("util_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4)) + +#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \ + ADF_TL_COUNTER("exec_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4)) + +/* Device level counters. */ +static const struct adf_tl_dbg_counter dev_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)), + /* Max read latency[ns]. */ + ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)), + /* Read latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)), + /* Max get to put latency[ns]. */ + ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)), + /* Page request latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)), + /* Page translation latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)), + /* Maximum uTLB used. */ + ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)), +}; + +/* Slice utilization counters. */ +static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cpr), + /* Translator slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(xlt), + /* Decompression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr), + /* PKE utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(pke), + /* Wireless Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wat), + /* Wireless Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wcp), + /* UCS slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ucs), + /* Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cph), + /* Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ath), +}; + +/* Slice execution counters. */ +static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cpr), + /* Translator slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(xlt), + /* Decompression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr), + /* PKE execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(pke), + /* Wireless Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wat), + /* Wireless Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wcp), + /* UCS slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ucs), + /* Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cph), + /* Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ath), +}; + +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ + tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; + tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; + tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; + tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; + + tl_data->dev_counters = dev_counters; + tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); + tl_data->sl_util_counters = sl_util_counters; + tl_data->sl_exec_counters = sl_exec_counters; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h new file mode 100644 index 000000000000..feb2eecf24cf --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_GEN4_TL_H +#define ADF_GEN4_TL_H + +#include +#include + +struct adf_tl_hw_data; + +/* Computation constants. */ +#define ADF_GEN4_CPP_NS_PER_CYCLE 2 +#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64 + +/* Maximum aggregation time. Value in milliseconds. */ +#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000 +/* Num of buffers to store historic values. */ +#define ADF_GEN4_TL_NUM_HIST_BUFFS \ + (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS) + +/* Max number of HW resources of one type. */ +#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 + +/** + * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. + * @reg_tm_slice_exec_cnt: Slice execution count. + * @reg_tm_slice_util: Slice utilization. + */ +struct adf_gen4_tl_slice_data_regs { + __u32 reg_tm_slice_exec_cnt; + __u32 reg_tm_slice_util; +}; + +#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs) + +/** + * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry + * counter values as are being populated periodically by device. + * @reg_tl_rd_lat_acc: read latency accumulator + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator + * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator + * @reg_tl_re_acc: accumulated ring empty time + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_rd_lat_max: maximum logged read latency + * @reg_tl_rd_cmpl_cnt: read requests completed count + * @reg_tl_gp_lat_max: maximum logged get to put latency + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_page_req_cnt: DevTLB page requests count + * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count + * @reg_tl_at_max_tlb_used: maximum uTLB used + * @reg_tl_re_cnt: ring empty time samples count + * @reserved: reserved + * @ath_slices: array of Authentication slices utilization registers + * @cph_slices: array of Cipher slices utilization registers + * @cpr_slices: array of Compression slices utilization registers + * @xlt_slices: array of Translator slices utilization registers + * @dcpr_slices: array of Decompression slices utilization registers + * @pke_slices: array of PKE slices utilization registers + * @ucs_slices: array of UCS slices utilization registers + * @wat_slices: array of Wireless Authentication slices utilization registers + * @wcp_slices: array of Wireless Cipher slices utilization registers + */ +struct adf_gen4_tl_device_data_regs { + __u64 reg_tl_rd_lat_acc; + __u64 reg_tl_gp_lat_acc; + __u64 reg_tl_at_page_req_lat_acc; + __u64 reg_tl_at_trans_lat_acc; + __u64 reg_tl_re_acc; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_rd_lat_max; + __u32 reg_tl_rd_cmpl_cnt; + __u32 reg_tl_gp_lat_max; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_page_req_cnt; + __u32 reg_tl_at_trans_lat_cnt; + __u32 reg_tl_at_max_tlb_used; + __u32 reg_tl_re_cnt; + __u32 reserved; + struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; +}; + +/** + * struct adf_gen4_tl_layout - This structure represents entire telemetry + * counters data: Device + 4 Ring Pairs as are being populated periodically + * by device. + * @tl_device_data_regs: structure of device telemetry registers + * @reserved1: reserved + * @reg_tl_msg_cnt: telemetry messages counter + * @reserved: reserved + */ +struct adf_gen4_tl_layout { + struct adf_gen4_tl_device_data_regs tl_device_data_regs; + __u32 reserved1[14]; + __u32 reg_tl_msg_cnt; + __u32 reserved; +}; + +#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout) +#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt) + +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data); +#else +static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_GEN4_TL_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 81c39f3d07e1..f43ae9111553 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -11,6 +11,7 @@ #include "adf_heartbeat.h" #include "adf_rl.h" #include "adf_sysfs_ras_counters.h" +#include "adf_telemetry.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -142,6 +143,10 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) if (ret && ret != -EOPNOTSUPP) return ret; + ret = adf_tl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services @@ -220,6 +225,10 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) if (ret && ret != -EOPNOTSUPP) return ret; + ret = adf_tl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), @@ -279,6 +288,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_tl_stop(accel_dev); adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); adf_sysfs_stop_ras(accel_dev); @@ -374,6 +384,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) adf_heartbeat_shutdown(accel_dev); + adf_tl_shutdown(accel_dev); + hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c new file mode 100644 index 000000000000..05c476d58895 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_telemetry.h" + +#define TL_IS_ZERO(input) ((input) == 0) + +static bool is_tl_supported(struct adf_accel_dev *accel_dev) +{ + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + + return fw_caps & TL_CAPABILITY_BIT; +} + +static int validate_tl_data(struct adf_tl_hw_data *tl_data) +{ + if (!tl_data->dev_counters || + TL_IS_ZERO(tl_data->num_dev_counters) || + !tl_data->sl_util_counters || + !tl_data->sl_exec_counters) + return -EOPNOTSUPP; + + return 0; +} + +static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + struct adf_telemetry *telemetry; + int node = dev_to_node(dev); + void *tl_data_regs; + unsigned int i; + + telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node); + if (!telemetry) + return -ENOMEM; + + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, + sizeof(*telemetry->regs_hist_buff), + GFP_KERNEL); + if (!telemetry->regs_hist_buff) + goto err_free_tl; + + telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, + &telemetry->regs_data_p, + GFP_KERNEL); + if (!telemetry->regs_data) + goto err_free_regs_hist_buff; + + for (i = 0; i < tl_data->num_hbuff; i++) { + tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node); + if (!tl_data_regs) + goto err_free_dma; + + telemetry->regs_hist_buff[i] = tl_data_regs; + } + + accel_dev->telemetry = telemetry; + + return 0; + +err_free_dma: + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + while (i--) + kfree(telemetry->regs_hist_buff[i]); + +err_free_regs_hist_buff: + kfree(telemetry->regs_hist_buff); +err_free_tl: + kfree(telemetry); + + return -ENOMEM; +} + +static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + unsigned int i; + + for (i = 0; i < tl_data->num_hbuff; i++) + kfree(telemetry->regs_hist_buff[i]); + + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + kfree(telemetry->regs_hist_buff); + kfree(telemetry); + accel_dev->telemetry = NULL; +} + +static unsigned long get_next_timeout(void) +{ + return msecs_to_jiffies(ADF_TL_TIMER_INT_MS); +} + +static void snapshot_regs(struct adf_telemetry *telemetry, size_t size) +{ + void *dst = telemetry->regs_hist_buff[telemetry->hb_num]; + void *src = telemetry->regs_data; + + memcpy(dst, src, size); +} + +static void tl_work_handler(struct work_struct *work) +{ + struct delayed_work *delayed_work; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + u32 msg_cnt, old_msg_cnt; + size_t layout_sz; + u32 *regs_data; + size_t id; + + delayed_work = to_delayed_work(work); + telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx); + tl_data = &GET_TL_DATA(telemetry->accel_dev); + regs_data = telemetry->regs_data; + + id = tl_data->msg_cnt_off / sizeof(*regs_data); + layout_sz = tl_data->layout_sz; + + if (!atomic_read(&telemetry->state)) { + cancel_delayed_work_sync(&telemetry->work_ctx); + return; + } + + msg_cnt = regs_data[id]; + old_msg_cnt = msg_cnt; + if (msg_cnt == telemetry->msg_cnt) + goto out; + + mutex_lock(&telemetry->regs_hist_lock); + + snapshot_regs(telemetry, layout_sz); + + /* Check if data changed while updating it */ + msg_cnt = regs_data[id]; + if (old_msg_cnt != msg_cnt) + snapshot_regs(telemetry, layout_sz); + + telemetry->msg_cnt = msg_cnt; + telemetry->hb_num++; + telemetry->hb_num %= telemetry->hbuffs; + + mutex_unlock(&telemetry->regs_hist_lock); + +out: + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); +} + +int adf_tl_halt(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + int ret; + + cancel_delayed_work_sync(&telemetry->work_ctx); + atomic_set(&telemetry->state, 0); + + ret = adf_send_admin_tl_stop(accel_dev); + if (ret) + dev_err(dev, "failed to stop telemetry\n"); + + return ret; +} + +int adf_tl_run(struct adf_accel_dev *accel_dev, int state) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t layout_sz = tl_data->layout_sz; + int ret; + + ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, + layout_sz, NULL, &telemetry->slice_cnt); + if (ret) { + dev_err(dev, "failed to start telemetry\n"); + return ret; + } + + telemetry->hbuffs = state; + atomic_set(&telemetry->state, state); + + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); + + return 0; +} + +int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + struct adf_telemetry *telemetry; + int ret; + + ret = validate_tl_data(tl_data); + if (ret) + return ret; + + ret = adf_tl_alloc_mem(accel_dev); + if (ret) { + dev_err(dev, "failed to initialize: %d\n", ret); + return ret; + } + + telemetry = accel_dev->telemetry; + telemetry->accel_dev = accel_dev; + + mutex_init(&telemetry->wr_lock); + mutex_init(&telemetry->regs_hist_lock); + INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + + return 0; +} + +int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + struct device *dev = &GET_DEV(accel_dev); + + if (!accel_dev->telemetry) + return -EOPNOTSUPP; + + if (!is_tl_supported(accel_dev)) { + dev_info(dev, "feature not supported by FW\n"); + adf_tl_free_mem(accel_dev); + return -EOPNOTSUPP; + } + + return 0; +} + +void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + if (atomic_read(&accel_dev->telemetry->state)) + adf_tl_halt(accel_dev); +} + +void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + adf_tl_free_mem(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h new file mode 100644 index 000000000000..08de17621467 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TELEMETRY_H +#define ADF_TELEMETRY_H + +#include +#include +#include +#include + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; +struct adf_tl_dbg_counter; +struct dentry; + +#define ADF_TL_SL_CNT_COUNT \ + (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8)) + +#define TL_CAPABILITY_BIT BIT(1) +/* Interval within device writes data to DMA region. Value in milliseconds. */ +#define ADF_TL_DATA_WR_INTERVAL_MS 1000 +/* Interval within timer interrupt should be handled. Value in milliseconds. */ +#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) + +struct adf_tl_hw_data { + size_t layout_sz; + size_t slice_reg_sz; + size_t msg_cnt_off; + const struct adf_tl_dbg_counter *dev_counters; + const struct adf_tl_dbg_counter *sl_util_counters; + const struct adf_tl_dbg_counter *sl_exec_counters; + u8 num_hbuff; + u8 cpp_ns_per_cycle; + u8 bw_units_to_bytes; + u8 num_dev_counters; +}; + +struct adf_telemetry { + struct adf_accel_dev *accel_dev; + atomic_t state; + u32 hbuffs; + int hb_num; + u32 msg_cnt; + dma_addr_t regs_data_p; /* bus address for DMA mapping */ + void *regs_data; /* virtual address for DMA mapping */ + /** + * @regs_hist_buff: array of pointers to copies of the last @hbuffs + * values of @regs_data + */ + void **regs_hist_buff; + struct dentry *dbg_dir; + /** + * @regs_hist_lock: protects from race conditions between write and read + * to the copies referenced by @regs_hist_buff + */ + struct mutex regs_hist_lock; + /** + * @wr_lock: protects from concurrent writes to debugfs telemetry files + */ + struct mutex wr_lock; + struct delayed_work work_ctx; + struct icp_qat_fw_init_admin_slice_cnt slice_cnt; +}; + +#ifdef CONFIG_DEBUG_FS +int adf_tl_init(struct adf_accel_dev *accel_dev); +int adf_tl_start(struct adf_accel_dev *accel_dev); +void adf_tl_stop(struct adf_accel_dev *accel_dev); +void adf_tl_shutdown(struct adf_accel_dev *accel_dev); +int adf_tl_run(struct adf_accel_dev *accel_dev, int state); +int adf_tl_halt(struct adf_accel_dev *accel_dev); +#else +static inline int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_TELEMETRY_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c new file mode 100644 index 000000000000..accb46d6ea3c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry debugfs: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define TL_VALUE_MIN_PADDING 20 +#define TL_KEY_MIN_PADDING 23 + +static int tl_collect_values_u32(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u32 *regs_hist_buff; + u32 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +static int tl_collect_values_u64(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u64 *regs_hist_buff; + u64 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +/** + * avg_array() - Return average of values within an array. + * @array: Array of values. + * @len: Number of elements. + * + * This algorithm computes average of an array without running into overflow. + * + * Return: average of values. + */ +#define avg_array(array, len) ( \ +{ \ + typeof(&(array)[0]) _array = (array); \ + __unqual_scalar_typeof(_array[0]) _x = 0; \ + __unqual_scalar_typeof(_array[0]) _y = 0; \ + __unqual_scalar_typeof(_array[0]) _a, _b; \ + typeof(len) _len = (len); \ + size_t _i; \ + \ + for (_i = 0; _i < _len; _i++) { \ + _a = _array[_i]; \ + _b = do_div(_a, _len); \ + _x += _a; \ + if (_y >= _len - _b) { \ + _x++; \ + _y -= _len - _b; \ + } else { \ + _y += _b; \ + } \ + } \ + do_div(_y, _len); \ + (_x + _y); \ +}) + +/* Calculation function for simple counter. */ +static int tl_calc_count(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert CPP bus cycles to ns. */ +static int tl_cycles_to_ns(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + int ret; + + ret = tl_calc_count(telemetry, ctr, vals); + if (ret) + return ret; + + vals->curr *= cpp_ns_per_cycle; + vals->min *= cpp_ns_per_cycle; + vals->max *= cpp_ns_per_cycle; + vals->avg *= cpp_ns_per_cycle; + + return 0; +} + +/* + * Compute latency cumulative average with division of accumulated value + * by sample count. Returned value is in ns. + */ +static int tl_lat_acc_avg(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + u8 num_hbuff = tl_data->num_hbuff; + int sample_cnt, i; + u64 *hist_vals; + u64 *hist_cnt; + int ret = 0; + + hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); + if (!hist_cnt) { + ret = -ENOMEM; + goto out_free_hist_vals; + } + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_cnt; + + tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); + + for (i = 0; i < sample_cnt; i++) { + /* Avoid division by 0 if count is 0. */ + if (hist_cnt[i]) + hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, + hist_cnt[i]); + else + hist_vals[i] = 0; + } + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_cnt: + kfree(hist_cnt); +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert HW raw bandwidth units to Mbps. */ +static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); + vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +static void tl_seq_printf_counter(struct adf_telemetry *telemetry, + struct seq_file *s, const char *name, + struct adf_tl_dbg_aggr_values *vals) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); + if (atomic_read(&telemetry->state) > 1) { + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); + } + seq_puts(s, "\n"); +} + +static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, + struct seq_file *s, + const struct adf_tl_dbg_counter *ctr, + const char *name) +{ + const char *counter_name = name ? name : ctr->name; + enum adf_tl_counter_type type = ctr->type; + struct adf_tl_dbg_aggr_values vals; + int ret; + + switch (type) { + case ADF_TL_SIMPLE_COUNT: + ret = tl_calc_count(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS: + ret = tl_cycles_to_ns(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS_AVG: + ret = tl_lat_acc_avg(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_MBPS: + ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + tl_seq_printf_counter(telemetry, s, counter_name, &vals); + + return 0; +} + +static int tl_print_sl_counter(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct seq_file *s, u8 cnt_id) +{ + size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; + struct adf_tl_dbg_counter slice_ctr; + size_t offset_inc = cnt_id * sl_regs_sz; + char cnt_name[MAX_COUNT_NAME_SIZE]; + + snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); + slice_ctr = *ctr; + slice_ctr.offset1 += offset_inc; + + return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); +} + +static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, + struct seq_file *s, u8 cnt_type, u8 cnt_id) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *sl_tl_util_counters; + const struct adf_tl_dbg_counter *sl_tl_exec_counters; + const struct adf_tl_dbg_counter *ctr; + int ret; + + sl_tl_util_counters = tl_data->sl_util_counters; + sl_tl_exec_counters = tl_data->sl_exec_counters; + + ctr = &sl_tl_util_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice utilization counter type\n"); + return ret; + } + + ctr = &sl_tl_exec_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice execution counter type\n"); + return ret; + } + + return 0; +} + +static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); + seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); +} + +static int tl_print_dev_data(struct adf_accel_dev *accel_dev, + struct seq_file *s) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *dev_tl_counters; + u8 num_dev_counters = tl_data->num_dev_counters; + u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; + const struct adf_tl_dbg_counter *ctr; + unsigned int i; + int ret; + u8 j; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + dev_tl_counters = tl_data->dev_counters; + + tl_print_msg_cnt(s, telemetry->msg_cnt); + + /* Print device level telemetry. */ + for (i = 0; i < num_dev_counters; i++) { + ctr = &dev_tl_counters[i]; + ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid counter type\n"); + return ret; + } + } + + /* Print per slice telemetry. */ + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + for (j = 0; j < sl_cnt[i]; j++) { + ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); + if (ret) + return ret; + } + } + + return 0; +} + +static int tl_dev_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + return tl_print_dev_data(accel_dev, s); +} +DEFINE_SHOW_ATTRIBUTE(tl_dev_data); + +static int tl_control_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); + + return 0; +} + +static ssize_t tl_control_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + struct device *dev; + u32 input; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + tl_data = &GET_TL_DATA(accel_dev); + telemetry = accel_dev->telemetry; + dev = &GET_DEV(accel_dev); + + mutex_lock(&telemetry->wr_lock); + + ret = kstrtou32_from_user(userbuf, count, 10, &input); + if (ret) + goto unlock_and_exit; + + if (input > tl_data->num_hbuff) { + dev_info(dev, "invalid control input\n"); + ret = -EINVAL; + goto unlock_and_exit; + } + + /* If input is 0, just stop telemetry. */ + if (!input) { + ret = adf_tl_halt(accel_dev); + if (!ret) + ret = count; + + goto unlock_and_exit; + } + + /* If TL is already enabled, stop it. */ + if (atomic_read(&telemetry->state)) { + dev_info(dev, "already enabled, restarting.\n"); + ret = adf_tl_halt(accel_dev); + if (ret) + goto unlock_and_exit; + } + + ret = adf_tl_run(accel_dev, input); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); + +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *parent = accel_dev->debugfs_dir; + struct dentry *dir; + + if (!telemetry) + return; + + dir = debugfs_create_dir("telemetry", parent); + accel_dev->telemetry->dbg_dir = dir; + debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); + debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); +} + +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *dbg_dir; + + if (!telemetry) + return; + + dbg_dir = telemetry->dbg_dir; + + debugfs_remove_recursive(dbg_dir); + + if (atomic_read(&telemetry->state)) + adf_tl_halt(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h new file mode 100644 index 000000000000..b2e8f1912c16 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TL_DEBUGFS_H +#define ADF_TL_DEBUGFS_H + +#include + +struct adf_accel_dev; + +#define MAX_COUNT_NAME_SIZE 32 +#define SNAPSHOT_CNT_MSG "sample_cnt" +#define RP_NUM_INDEX "rp_num" +#define PCI_TRANS_CNT_NAME "pci_trans_cnt" +#define MAX_RD_LAT_NAME "max_rd_lat" +#define RD_LAT_ACC_NAME "rd_lat_acc_avg" +#define MAX_LAT_NAME "max_gp_lat" +#define LAT_ACC_NAME "gp_lat_acc_avg" +#define BW_IN_NAME "bw_in" +#define BW_OUT_NAME "bw_out" +#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg" +#define AT_TRANS_LAT_NAME "at_trans_lat_avg" +#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used" +#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit" +#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" +#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" +#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" + +#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ + offsetof(struct adf_##qat_gen##_tl_layout, reg) + +#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg)) + +#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \ + (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) + +/** + * enum adf_tl_counter_type - telemetry counter types + * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter + * @ADF_TL_SIMPLE_COUNT: simple counter + * @ADF_TL_COUNTER_NS: latency counter, value in ns + * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns + * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps + */ +enum adf_tl_counter_type { + ADF_TL_COUNTER_UNSUPPORTED, + ADF_TL_SIMPLE_COUNT, + ADF_TL_COUNTER_NS, + ADF_TL_COUNTER_NS_AVG, + ADF_TL_COUNTER_MBPS, +}; + +/** + * struct adf_tl_dbg_counter - telemetry counter definition + * @name: name of the counter as printed in the report + * @adf_tl_counter_type: type of the counter + * @offset1: offset of 1st register + * @offset2: offset of 2nd optional register + */ +struct adf_tl_dbg_counter { + const char *name; + enum adf_tl_counter_type type; + size_t offset1; + size_t offset2; +}; + +#define ADF_TL_COUNTER(_name, _type, _offset) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset \ +} + +#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset1, \ + .offset2 = _offset2 \ +} + +/* Telemetry counter aggregated values. */ +struct adf_tl_dbg_aggr_values { + u64 curr; + u64 min; + u64 max; + u64 avg; +}; + +/** + * adf_tl_dbgfs_add() - Add telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Creates telemetry's debug fs folder and attributes in QAT debug fs root. + */ +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev); + +/** + * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Removes telemetry's debug fs folder and attributes from QAT debug fs root. + */ +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_TL_DEBUGFS_H */ -- Gitee From 4d33fa0b24a6e70942341a06b688cbbbb05d006b Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:08 +0100 Subject: [PATCH 757/953] crypto: qat - add support for ring pair level telemetry ANBZ: #8589 commit eb52707716e3f2cdf16f4e95e3a800cca190504f upstream. Intel-SIG: commit eb52707716e3 crypto: qat - add support for ring pair level telemetry Backport to support Intel QAT in-tree driver Expose through debugfs ring pair telemetry data for QAT GEN4 devices. This allows to gather metrics about the PCIe channel and device TLB for a selected ring pair. It is possible to monitor maximum 4 ring pairs at the time per device. For details, refer to debugfs-driver-qat_telemetry in Documentation/ABI. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/debugfs-driver-qat_telemetry | 125 +++++++++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 1 + .../crypto/intel/qat/qat_common/adf_gen4_tl.c | 35 +++ .../crypto/intel/qat/qat_common/adf_gen4_tl.h | 41 +++- .../intel/qat/qat_common/adf_telemetry.c | 23 +- .../intel/qat/qat_common/adf_telemetry.h | 7 + .../intel/qat/qat_common/adf_tl_debugfs.c | 208 ++++++++++++++++++ .../intel/qat/qat_common/adf_tl_debugfs.h | 11 + 11 files changed, 449 insertions(+), 5 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry index 24532365387c..eacee2072088 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat_telemetry +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -101,3 +101,128 @@ Description: (RO) Reports device telemetry counters. attribute is not reported. This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/rp__data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file, + and report telemetry counters related to each. + + Allowed values to write: + ======================== + * 0 to ````: + Ring pair to be monitored. The value of ``num_rps`` can be + retrieved through ``/sys/bus/pci/devices//qat/num_rps``. + See Documentation/ABI/testing/sysfs-driver-qat. + + Reads report metrics about performance and utilization of + the selected RP: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms + rp_num RP number associated with slot + service_type service associated to the RP + pci_trans_cnt number of PCIe partial transactions + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_glob_devtlb_hit Message descriptor DevTLB hit rate + at_glob_devtlb_miss Message descriptor DevTLB miss rate + tl_at_payld_devtlb_hit Payload DevTLB hit rate + tl_at_payld_devtlb_miss Payload DevTLB miss rate + ======================= ======================================== + + Example. + + Writing the value '32' to the file ``rp_C_data`` starts the + collection of telemetry metrics for ring pair 32:: + + echo 32 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + Once a ring pair is selected, statistics can be read accessing + the file:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + + On QAT GEN4 devices there are 64 RPs on a PF, so the allowed + values are 0..63. This number is absolute to the device. + If Virtual Functions (VF) are used, the ring pair number can + be derived from the Bus, Device, Function of the VF: + + ============ ====== ====== ====== ====== + PCI BDF/VF RP0 RP1 RP2 RP3 + ============ ====== ====== ====== ====== + 0000:6b:0.1 RP 0 RP 1 RP 2 RP 3 + 0000:6b:0.2 RP 4 RP 5 RP 6 RP 7 + 0000:6b:0.3 RP 8 RP 9 RP 10 RP 11 + 0000:6b:0.4 RP 12 RP 13 RP 14 RP 15 + 0000:6b:0.5 RP 16 RP 17 RP 18 RP 19 + 0000:6b:0.6 RP 20 RP 21 RP 22 RP 23 + 0000:6b:0.7 RP 24 RP 25 RP 26 RP 27 + 0000:6b:1.0 RP 28 RP 29 RP 30 RP 31 + 0000:6b:1.1 RP 32 RP 33 RP 34 RP 35 + 0000:6b:1.2 RP 36 RP 37 RP 38 RP 39 + 0000:6b:1.3 RP 40 RP 41 RP 42 RP 43 + 0000:6b:1.4 RP 44 RP 45 RP 46 RP 47 + 0000:6b:1.5 RP 48 RP 49 RP 50 RP 51 + 0000:6b:1.6 RP 52 RP 53 RP 54 RP 55 + 0000:6b:1.7 RP 56 RP 57 RP 58 RP 59 + 0000:6b:2.0 RP 60 RP 61 RP 62 RP 63 + ============ ====== ====== ====== ====== + + The mapping is only valid for the BDFs of VFs on the host. + + + The service provided on a ring-pair varies depending on the + configuration. The configuration for a given device can be + queried and set using ``cfg_services``. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + The following table reports how ring pairs are mapped to VFs + on the PF 0000:6b:0.0 configured for `sym;asym` or `asym;sym`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 asym RP 1 sym RP 2 asym RP 3 sym + 0000:6b:0.2 RP 4 asym RP 5 sym RP 6 asym RP 7 sym + 0000:6b:0.3 RP 8 asym RP 9 sym RP10 asym RP11 sym + ... ... ... ... ... + =========== ============ =========== ============ =========== + + All VFs follow the same pattern. + + + The following table reports how ring pairs are mapped to VFs on + the PF 0000:6b:0.0 configured for `dc`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 dc RP 1 dc RP 2 dc RP 3 dc + 0000:6b:0.2 RP 4 dc RP 5 dc RP 6 dc RP 7 dc + 0000:6b:0.3 RP 8 dc RP 9 dc RP10 dc RP11 dc + ... ... ... ... ... + =========== ============ =========== ============ =========== + + The mapping of a RP to a service can be retrieved using + ``rp2srv`` from sysfs. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index a7730d8057d6..5edce27db864 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -520,6 +520,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; hw_data->fw_name = ADF_420XX_FW; hw_data->fw_mmp_name = ADF_420XX_MMP; hw_data->uof_get_name = uof_get_name_420xx; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index e988a04a34ef..efd1c964182f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -429,6 +429,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { case ADF_402XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_402XX_FW; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index b274ebc799c9..db671879b1f8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -278,6 +278,7 @@ struct adf_hw_device_data { u8 num_logical_accel; u8 num_engines; u32 num_hb_ctrs; + u8 num_rps; }; /* CSR write macro */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 051ad20581a6..46a782ba456f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -36,6 +36,7 @@ #define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) /* Bank and ring configuration */ +#define ADF_GEN4_MAX_RPS 64 #define ADF_GEN4_NUM_RINGS_PER_BANK 2 #define ADF_GEN4_NUM_BANKS_PER_VF 4 #define ADF_GEN4_ETR_MAX_BANKS 64 diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c index 4efbe6bc651c..7fc7a77f6aed 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -9,6 +9,8 @@ #define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) +#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4) + #define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ ADF_TL_COUNTER("util_" #_name, \ ADF_TL_SIMPLE_COUNT, \ @@ -101,11 +103,42 @@ static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { ADF_GEN4_TL_SL_EXEC_COUNTER(ath), }; +/* Ring pair counters. */ +static const struct adf_tl_dbg_counter rp_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)), + /* Message descriptor DevTLB hit rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)), + /* Message descriptor DevTLB miss rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)), + /* Payload DevTLB hit rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)), + /* Payload DevTLB miss rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)), +}; + void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) { tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ; tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM; tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; @@ -114,5 +147,7 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); tl_data->sl_util_counters = sl_util_counters; tl_data->sl_exec_counters = sl_exec_counters; + tl_data->rp_counters = rp_counters; + tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); } EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h index feb2eecf24cf..32df4163beb9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -21,6 +21,9 @@ struct adf_tl_hw_data; /* Max number of HW resources of one type. */ #define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 +/* Max number of simultaneously monitored ring pairs. */ +#define ADF_GEN4_TL_MAX_RP_NUM 4 + /** * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. * @reg_tm_slice_exec_cnt: Slice execution count. @@ -92,18 +95,52 @@ struct adf_gen4_tl_device_data_regs { struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; }; +/** + * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair + * telemetry counter values as are being populated periodically by device. + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reserved: reserved + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate + * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate + * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate + * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate + * @reg_tl_re_cnt: ring empty time samples count + * @reserved1: reserved + */ +struct adf_gen4_tl_ring_pair_data_regs { + __u64 reg_tl_gp_lat_acc; + __u64 reserved; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_glob_devtlb_hit; + __u32 reg_tl_at_glob_devtlb_miss; + __u32 reg_tl_at_payld_devtlb_hit; + __u32 reg_tl_at_payld_devtlb_miss; + __u32 reg_tl_re_cnt; + __u32 reserved1; +}; + +#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs) + /** * struct adf_gen4_tl_layout - This structure represents entire telemetry * counters data: Device + 4 Ring Pairs as are being populated periodically * by device. * @tl_device_data_regs: structure of device telemetry registers - * @reserved1: reserved + * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers * @reg_tl_msg_cnt: telemetry messages counter * @reserved: reserved */ struct adf_gen4_tl_layout { struct adf_gen4_tl_device_data_regs tl_device_data_regs; - __u32 reserved1[14]; + struct adf_gen4_tl_ring_pair_data_regs + tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM]; __u32 reg_tl_msg_cnt; __u32 reserved; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c index 05c476d58895..2ff714d11bd2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -33,7 +33,9 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data) if (!tl_data->dev_counters || TL_IS_ZERO(tl_data->num_dev_counters) || !tl_data->sl_util_counters || - !tl_data->sl_exec_counters) + !tl_data->sl_exec_counters || + !tl_data->rp_counters || + TL_IS_ZERO(tl_data->num_rp_counters)) return -EOPNOTSUPP; return 0; @@ -53,11 +55,17 @@ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) if (!telemetry) return -ENOMEM; + telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp, + sizeof(*telemetry->rp_num_indexes), + GFP_KERNEL); + if (!telemetry->rp_num_indexes) + goto err_free_tl; + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, sizeof(*telemetry->regs_hist_buff), GFP_KERNEL); if (!telemetry->regs_hist_buff) - goto err_free_tl; + goto err_free_rp_indexes; telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, &telemetry->regs_data_p, @@ -86,6 +94,8 @@ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) err_free_regs_hist_buff: kfree(telemetry->regs_hist_buff); +err_free_rp_indexes: + kfree(telemetry->rp_num_indexes); err_free_tl: kfree(telemetry); @@ -107,6 +117,7 @@ static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) telemetry->regs_data_p); kfree(telemetry->regs_hist_buff); + kfree(telemetry->rp_num_indexes); kfree(telemetry); accel_dev->telemetry = NULL; } @@ -196,7 +207,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) int ret; ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, - layout_sz, NULL, &telemetry->slice_cnt); + layout_sz, telemetry->rp_num_indexes, + &telemetry->slice_cnt); if (ret) { dev_err(dev, "failed to start telemetry\n"); return ret; @@ -213,8 +225,10 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) int adf_tl_init(struct adf_accel_dev *accel_dev) { struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; struct device *dev = &GET_DEV(accel_dev); struct adf_telemetry *telemetry; + unsigned int i; int ret; ret = validate_tl_data(tl_data); @@ -234,6 +248,9 @@ int adf_tl_init(struct adf_accel_dev *accel_dev) mutex_init(&telemetry->regs_hist_lock); INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + for (i = 0; i < max_rp; i++) + telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED; + return 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h index 08de17621467..9be81cd3b886 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -23,17 +23,23 @@ struct dentry; /* Interval within timer interrupt should be handled. Value in milliseconds. */ #define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) +#define ADF_TL_RP_REGS_DISABLED (0xff) + struct adf_tl_hw_data { size_t layout_sz; size_t slice_reg_sz; + size_t rp_reg_sz; size_t msg_cnt_off; const struct adf_tl_dbg_counter *dev_counters; const struct adf_tl_dbg_counter *sl_util_counters; const struct adf_tl_dbg_counter *sl_exec_counters; + const struct adf_tl_dbg_counter *rp_counters; u8 num_hbuff; u8 cpp_ns_per_cycle; u8 bw_units_to_bytes; u8 num_dev_counters; + u8 num_rp_counters; + u8 max_rp; }; struct adf_telemetry { @@ -50,6 +56,7 @@ struct adf_telemetry { */ void **regs_hist_buff; struct dentry *dbg_dir; + u8 *rp_num_indexes; /** * @regs_hist_lock: protects from race conditions between write and read * to the copies referenced by @regs_hist_buff diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c index accb46d6ea3c..c8241f5a0a26 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -14,11 +15,13 @@ #include #include "adf_accel_devices.h" +#include "adf_cfg_strings.h" #include "adf_telemetry.h" #include "adf_tl_debugfs.h" #define TL_VALUE_MIN_PADDING 20 #define TL_KEY_MIN_PADDING 23 +#define TL_RP_SRV_UNKNOWN "Unknown" static int tl_collect_values_u32(struct adf_telemetry *telemetry, size_t counter_offset, u64 *arr) @@ -470,11 +473,210 @@ static ssize_t tl_control_write(struct file *file, const char __user *userbuf, } DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); +static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) +{ + char alpha; + u8 index; + int ret; + + ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); + if (ret != 1) + return -EINVAL; + + index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); + *rp_id = index; + + return 0; +} + +static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, + unsigned int new_rp_num, + unsigned int rp_regs_index) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + unsigned int i; + u8 curr_state; + int ret; + + if (new_rp_num >= hw_data->num_rps) { + dev_info(dev, "invalid Ring Pair number selected\n"); + return -EINVAL; + } + + for (i = 0; i < hw_data->tl_data.max_rp; i++) { + if (telemetry->rp_num_indexes[i] == new_rp_num) { + dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); + return 0; + } + } + + dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + + curr_state = atomic_read(&telemetry->state); + + if (curr_state) { + ret = adf_tl_halt(accel_dev); + if (ret) + return ret; + + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + + ret = adf_tl_run(accel_dev, curr_state); + if (ret) + return ret; + } else { + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + } + + return 0; +} + +static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_idx) +{ + u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; + enum adf_cfg_service_type svc; + + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); + + svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); + switch (svc) { + case COMP: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); + break; + case SYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); + break; + case ASYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); + break; + default: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); + break; + } +} + +static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_regs_index) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *rp_tl_counters; + u8 num_rp_counters = tl_data->num_rp_counters; + size_t rp_regs_sz = tl_data->rp_reg_sz; + struct adf_tl_dbg_counter ctr; + unsigned int i; + u8 rp_idx; + int ret; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + rp_tl_counters = tl_data->rp_counters; + rp_idx = telemetry->rp_num_indexes[rp_regs_index]; + + if (rp_idx == ADF_TL_RP_REGS_DISABLED) { + dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", + ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + return -EPERM; + } + + tl_print_msg_cnt(s, telemetry->msg_cnt); + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); + seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); + tl_print_rp_srv(accel_dev, s, rp_idx); + + for (i = 0; i < num_rp_counters; i++) { + ctr = rp_tl_counters[i]; + ctr.offset1 += rp_regs_sz * rp_regs_index; + ctr.offset2 += rp_regs_sz * rp_regs_index; + ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "invalid RP counter type\n"); + return ret; + } + } + + return 0; +} + +static int tl_rp_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + u8 rp_regs_index; + u8 max_rp; + int ret; + + if (!accel_dev) + return -EINVAL; + + max_rp = GET_TL_DATA(accel_dev).max_rp; + ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + return ret; + } + + return tl_print_rp_data(accel_dev, s, rp_regs_index); +} + +static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + unsigned int new_rp_num; + u8 rp_regs_index; + u8 max_rp; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + telemetry = accel_dev->telemetry; + max_rp = GET_TL_DATA(accel_dev).max_rp; + + mutex_lock(&telemetry->wr_lock); + + ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + goto unlock_and_exit; + } + + ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); + if (ret) + goto unlock_and_exit; + + ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); + void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_telemetry *telemetry = accel_dev->telemetry; struct dentry *parent = accel_dev->debugfs_dir; + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + char name[ADF_TL_RP_REGS_FNAME_SIZE]; struct dentry *dir; + unsigned int i; if (!telemetry) return; @@ -483,6 +685,12 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) accel_dev->telemetry->dbg_dir = dir; debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); + + for (i = 0; i < max_rp; i++) { + snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, + ADF_TL_DBG_RP_ALPHA_INDEX(i)); + debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); + } } void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h index b2e8f1912c16..11cc9eae19b3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -24,6 +24,13 @@ struct adf_accel_dev; #define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" #define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" #define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" +#define RP_SERVICE_TYPE "service_type" + +#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A') +#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A') + +#define ADF_TL_RP_REGS_FNAME "rp_%c_data" +#define ADF_TL_RP_REGS_FNAME_SIZE 16 #define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ offsetof(struct adf_##qat_gen##_tl_layout, reg) @@ -36,6 +43,10 @@ struct adf_accel_dev; (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) +#define ADF_TL_RP_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg)) + /** * enum adf_tl_counter_type - telemetry counter types * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter -- Gitee From 884ad92a428ca7b5f19b268ffd05c832f52c8fef Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 22 Dec 2023 14:15:35 +0100 Subject: [PATCH 758/953] crypto: qat - generate dynamically arbiter mappings ANBZ: #8589 commit 5da6a2d5353e0e234f12ccacaf6f50656cc33278 upstream. Intel-SIG: commit 5da6a2d5353e crypto: qat - generate dynamically arbiter mappings Backport to support Intel QAT in-tree driver The thread-to-arbiter mapping describes which arbiter can assign jobs to an acceleration engine thread. The existing mappings are functionally correct, but hardcoded and not optimized. Replace the static mappings with an algorithm that generates optimal mappings, based on the loaded configuration. The logic has been made common so that it can be shared between all QAT GEN4 devices. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 131 +++++++----------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 110 ++++++++++----- .../intel/qat/qat_common/adf_accel_devices.h | 4 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 90 ++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 12 ++ 5 files changed, 235 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 5edce27db864..a87d29ae724f 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -25,6 +25,10 @@ #define ADF_AE_GROUP_3 GENMASK(15, 12) #define ADF_AE_GROUP_4 BIT(16) +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_SYM GENMASK(3, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + static const char * const adf_420xx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, @@ -83,62 +87,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = { {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, }; -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - -static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x0 -}; - -static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; static struct adf_hw_device_class adf_420xx_class = { .name = ADF_420XX_DEVICE_NAME, @@ -346,24 +294,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (adf_get_service_enabled(accel_dev)) { - case SVC_ASYM: - return thrd_to_arb_map_asym; - case SVC_SYM: - return thrd_to_arb_map_sym; - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - case SVC_ASYM_DC: - case SVC_DC_ASYM: - return thrd_to_arb_map_asym_dc; - case SVC_DC_SYM: - case SVC_SYM_DC: - return thrd_to_arb_map_sym_dc; - default: - return default_thrd_to_arb_map; - } + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) @@ -384,11 +319,47 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + case ADF_AE_GROUP_3: + return RP_GROUP_1; + case ADF_AE_GROUP_2: + if (get_fw_config(accel_dev) == adf_fw_cy_config) + return RP_GROUP_0; + else + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) { @@ -526,6 +497,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->uof_get_name = uof_get_name_420xx; hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; + hw_data->get_ena_thd_mask = get_ena_thd_mask; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index efd1c964182f..af67fd7e8caf 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -23,6 +23,11 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0) +#define ENA_THD_MASK_SYM GENMASK(6, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, @@ -86,25 +91,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)) static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { - 0x5555555, 0x5555555, 0x5555555, 0x5555555, - 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, @@ -220,14 +206,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (adf_get_service_enabled(accel_dev)) { - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - default: - return default_thrd_to_arb_map; - } + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) @@ -278,11 +261,64 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev } } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} + +static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM_401XX; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) { @@ -436,14 +472,22 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; break; - + case ADF_401XX_PCI_DEVICE_ID: + hw_data->fw_name = ADF_4XXX_FW; + hw_data->fw_mmp_name = ADF_4XXX_MMP; + hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx; + break; default: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; } hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index db671879b1f8..a16c7e6edc65 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -13,6 +13,7 @@ #include "adf_rl.h" #include "adf_telemetry.h" #include "adf_pfvf_msg.h" +#include "icp_qat_hw.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" @@ -248,6 +249,8 @@ struct adf_hw_device_data { const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); + u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; @@ -270,6 +273,7 @@ struct adf_hw_device_data { u32 admin_ae_mask; u16 tx_rings_mask; u16 ring_to_svc_map; + u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER]; u8 tx_rx_gap; u8 num_banks; u16 num_banks_per_vf; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index ee08b34876dd..9985683056d5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 Intel Corporation */ #include #include "adf_accel_devices.h" +#include "adf_cfg_services.h" #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" @@ -340,3 +341,92 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) return ret; } EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); + +static const u32 thrd_to_arb_map_dcc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u16 rp_group_to_arb_mask[] = { + [RP_GROUP_0] = 0x5, + [RP_GROUP_1] = 0xA, +}; + +static bool is_single_service(int service_id) +{ + switch (service_id) { + case SVC_DC: + case SVC_SYM: + case SVC_ASYM: + return true; + case SVC_CY: + case SVC_CY2: + case SVC_DCC: + case SVC_ASYM_DC: + case SVC_DC_ASYM: + case SVC_SYM_DC: + case SVC_DC_SYM: + default: + return false; + } +} + +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int ae_cnt, worker_obj_cnt, i, j; + unsigned long ae_mask, thds_mask; + int srv_id, rp_group; + u32 thd2arb_map_base; + u16 arb_mask; + + if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || + !hw_data->get_num_aes || !hw_data->uof_get_num_objs || + !hw_data->uof_get_ae_mask) + return -EFAULT; + + srv_id = adf_get_service_enabled(accel_dev); + if (srv_id < 0) + return srv_id; + + ae_cnt = hw_data->get_num_aes(hw_data); + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + + if (srv_id == SVC_DCC) { + memcpy(thd2arb_map, thrd_to_arb_map_dcc, + array_size(sizeof(*thd2arb_map), ae_cnt)); + return 0; + } + + for (i = 0; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); + thd2arb_map_base = 0; + + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return -EINVAL; + + if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) + return -EINVAL; + + if (is_single_service(srv_id)) + arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | + rp_group_to_arb_mask[RP_GROUP_1]; + else + arb_mask = rp_group_to_arb_mask[rp_group]; + + for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_map_base |= arb_mask << (j * 4); + + for_each_set_bit(j, &ae_mask, ae_cnt) + thd2arb_map[j] = thd2arb_map_base; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 46a782ba456f..7d8a774cadc8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -28,6 +28,7 @@ /* Accelerators */ #define ADF_GEN4_ACCELERATORS_MASK 0x1 #define ADF_GEN4_MAX_ACCELERATORS 1 +#define ADF_GEN4_ADMIN_ACCELENGINES 1 /* MSIX interrupt */ #define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 @@ -193,6 +194,9 @@ do { \ #define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 +/* Arbiter threads mask with error value */ +#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); enum icp_qat_gen4_slice_mask { @@ -207,6 +211,12 @@ enum icp_qat_gen4_slice_mask { ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), }; +enum adf_gen4_rp_groups { + RP_GROUP_0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); @@ -224,4 +234,6 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); + #endif -- Gitee From 728c0c84fc0c1f9ce974eb7a54296ccfc1bbd43d Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 19 Jan 2024 17:12:38 +0100 Subject: [PATCH 759/953] crypto: qat - fix arbiter mapping generation algorithm for QAT 402xx ANBZ: #8589 commit e1d54d153fc3e697b841999df7cbad51492def8e upstream. Intel-SIG: commit e1d54d153fc3 crypto: qat - fix arbiter mapping generation algorithm for QAT 402xx Backport to support Intel QAT in-tree driver The commit "crypto: qat - generate dynamically arbiter mappings" introduced a regression on qat_402xx devices. This is reported when the driver probes the device, as indicated by the following error messages: 4xxx 0000:0b:00.0: enabling device (0140 -> 0142) 4xxx 0000:0b:00.0: Generate of the thread to arbiter map failed 4xxx 0000:0b:00.0: Direct firmware load for qat_402xx_mmp.bin failed with error -2 The root cause of this issue was the omission of a necessary function pointer required by the mapping algorithm during the implementation. Fix it by adding the missing function pointer. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index af67fd7e8caf..e171cddf6f02 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -471,6 +471,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; break; case ADF_401XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_4XXX_FW; -- Gitee From c9b2bb7b0e9dd81f86980cacb3d878190ac31c8a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Jan 2024 17:26:02 +0100 Subject: [PATCH 760/953] crypto: qat - avoid memcpy() overflow warning ANBZ: #8589 commit 23a22e831ed4e6aa0831312e8cc8b7c60a657f60 upstream. Intel-SIG: commit 23a22e831ed4 crypto: qat - avoid memcpy() overflow warning Backport to support Intel QAT in-tree driver The use of array_size() leads gcc to assume the memcpy() can have a larger limit than actually possible, which triggers a string fortification warning: In file included from include/linux/string.h:296, from include/linux/bitmap.h:12, from include/linux/cpumask.h:12, from include/linux/sched.h:16, from include/linux/delay.h:23, from include/linux/iopoll.h:12, from drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:3: In function 'fortify_memcpy_chk', inlined from 'adf_gen4_init_thd2arb_map' at drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:401:3: include/linux/fortify-string.h:579:4: error: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror=attribute-warning] 579 | __write_overflow_field(p_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/fortify-string.h:588:4: error: call to '__read_overflow2_field' declared with attribute warning: detected read beyond size of field (2nd parameter); maybe use struct_group()? [-Werror=attribute-warning] 588 | __read_overflow2_field(q_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add an explicit range check to avoid this. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Arnd Bergmann Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 9985683056d5..f752653ccb47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -398,6 +398,9 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) ADF_GEN4_ADMIN_ACCELENGINES; if (srv_id == SVC_DCC) { + if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) + return -EINVAL; + memcpy(thd2arb_map, thrd_to_arb_map_dcc, array_size(sizeof(*thd2arb_map), ae_cnt)); return 0; -- Gitee From 9f9cb5188ea275b30eae6bca3bba713b188dee61 Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Sun, 21 Jan 2024 17:40:43 +0100 Subject: [PATCH 761/953] crypto: qat - use kcalloc_node() instead of kzalloc_node() ANBZ: #8589 commit 4da3bc65d218605557696109e42cfeee666d601f upstream. Intel-SIG: commit 4da3bc65d218 crypto: qat - use kcalloc_node() instead of kzalloc_node() Backport to support Intel QAT in-tree driver As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc_node() function instead of the argument count * size in the kzalloc_node() function. Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1] Link: https://github.com/KSPP/linux/issues/162 Signed-off-by: Erick Archer Reviewed-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 3557a0d6dea2..a13d9885d60f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -272,7 +272,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!irqs) return -ENOMEM; -- Gitee From d73de88838ddff6ae182c2c1632c37010b74f61c Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:16 +0800 Subject: [PATCH 762/953] crypto: qat - add heartbeat error simulator ANBZ: #8589 commit e2b67859ab6efd4458bda1baaee20331a367d995 upstream. Intel-SIG: commit e2b67859ab6e crypto: qat - add heartbeat error simulator Backport to support Intel QAT in-tree driver Add a mechanism that allows to inject a heartbeat error for testing purposes. A new attribute `inject_error` is added to debugfs for each QAT device. Upon a write on this attribute, the driver will inject an error on the device which can then be detected by the heartbeat feature. Errors are breaking the device functionality thus they require a device reset in order to be recovered. This functionality is not compiled by default, to enable it CRYPTO_DEV_QAT_ERROR_INJECTION must be set. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Lucas Segarra Fernandez Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 26 +++++++ drivers/crypto/intel/qat/Kconfig | 14 ++++ drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 6 -- .../intel/qat/qat_common/adf_heartbeat.h | 18 +++++ .../qat/qat_common/adf_heartbeat_dbgfs.c | 52 +++++++++++++ .../qat/qat_common/adf_heartbeat_inject.c | 76 +++++++++++++++++++ .../intel/qat/qat_common/adf_hw_arbiter.c | 25 ++++++ 9 files changed, 214 insertions(+), 6 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index b2db010d851e..bd6793760f29 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -81,3 +81,29 @@ Description: (RO) Read returns, for each Acceleration Engine (AE), the number : Number of Compress and Verify (CnV) errors and type of the last CnV error detected by Acceleration Engine N. + +What: /sys/kernel/debug/qat__/heartbeat/inject_error +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (WO) Write to inject an error that simulates an heartbeat + failure. This is to be used for testing purposes. + + After writing this file, the driver stops arbitration on a + random engine and disables the fetching of heartbeat counters. + If a workload is running on the device, a job submitted to the + accelerator might not get a response and a read of the + `heartbeat/status` attribute might report -1, i.e. device + unresponsive. + The error is unrecoverable thus the device must be restarted to + restore its functionality. + + This attribute is available only when the kernel is built with + CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION=y. + + A write of 1 enables error injection. + + The following example shows how to enable error injection:: + + # cd /sys/kernel/debug/qat__ + # echo 1 > heartbeat/inject_error diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index c120f6715a09..02fb8abe4e6e 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF To compile this as a module, choose M here: the module will be called qat_c62xvf. + +config CRYPTO_DEV_QAT_ERROR_INJECTION + bool "Support for Intel(R) QAT Devices Heartbeat Error Injection" + depends on CRYPTO_DEV_QAT + depends on DEBUG_FS + help + Enables a mechanism that allows to inject a heartbeat error on + Intel(R) QuickAssist devices for testing purposes. + + This is intended for developer use only. + If unsure, say N. + + This functionality is available via debugfs entry of the Intel(R) + QuickAssist device diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 6908727bff3b..5915cde8a7aa 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ adf_gen2_pfvf.o adf_gen4_pfvf.o + +intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index f06188033a93..0baae42deb3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -90,6 +90,7 @@ void adf_exit_aer(void); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index 13f48d2f6da8..f88b1bc6857e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -23,12 +23,6 @@ #define ADF_HB_EMPTY_SIG 0xA5A5A5A5 -/* Heartbeat counter pair */ -struct hb_cnt_pair { - __u16 resp_heartbeat_cnt; - __u16 req_heartbeat_cnt; -}; - static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) { u64 curr_time = adf_clock_get_current_time(); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index b22e3cb29798..24c3f4f24c86 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -19,6 +19,12 @@ enum adf_device_heartbeat_status { HB_DEV_UNSUPPORTED, }; +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; @@ -35,6 +41,9 @@ struct adf_heartbeat { struct dentry *cfg; struct dentry *sent; struct dentry *failed; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + struct dentry *inject_error; +#endif } dbgfs; }; @@ -51,6 +60,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); +#else +static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + return -EPERM; +} +#endif + #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 2661af6a2ef6..5cd6c2d6f90a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -155,6 +155,43 @@ static const struct file_operations adf_hb_cfg_fops = { .write = adf_hb_cfg_write, }; +static ssize_t adf_hb_error_inject_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct adf_accel_dev *accel_dev = file->private_data; + size_t written_chars; + char buf[3]; + int ret; + + /* last byte left as string termination */ + if (count != 2) + return -EINVAL; + + written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, + ppos, user_buf, count); + if (buf[0] != '1') + return -EINVAL; + + ret = adf_heartbeat_inject_error(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat error injection failed with status %d\n", + ret); + return ret; + } + + dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); + + return written_chars; +} + +static const struct file_operations adf_hb_error_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = adf_hb_error_inject_write, +}; + void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_heartbeat *hb = accel_dev->heartbeat; @@ -171,6 +208,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) &hb->hb_failed_counter, &adf_hb_stats_fops); hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, accel_dev, &adf_hb_cfg_fops); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) { + struct dentry *inject_error __maybe_unused; + + inject_error = debugfs_create_file("inject_error", 0200, + hb->dbgfs.base_dir, accel_dev, + &adf_hb_error_inject_fops); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + hb->dbgfs.inject_error = inject_error; +#endif + } } EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); @@ -189,6 +237,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) hb->dbgfs.failed = NULL; debugfs_remove(hb->dbgfs.cfg); hb->dbgfs.cfg = NULL; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + debugfs_remove(hb->dbgfs.inject_error); + hb->dbgfs.inject_error = NULL; +#endif debugfs_remove(hb->dbgfs.base_dir); hb->dbgfs.base_dir = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c new file mode 100644 index 000000000000..a3b474bdef6c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include + +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" + +#define MAX_HB_TICKS 0xFFFFFFFF + +static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + accel_dev->heartbeat->hb_timer = 0; + + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + + return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); +} + +static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, + u32 thr) +{ + struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + size_t thr_id = ae * hb_ctrs + thr; + u16 num_rsp = stats[thr_id].resp_heartbeat_cnt; + + /* + * Inject live.req != live.rsp and live.rsp == last.rsp + * to trigger the heartbeat error detection + */ + stats[thr_id].req_heartbeat_cnt++; + stats += (max_aes * hb_ctrs); + stats[thr_id].resp_heartbeat_cnt = num_rsp; +} + +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + u32 rand, rand_ae, rand_thr; + unsigned long ae_mask; + int ret; + + ae_mask = hw_device->ae_mask; + + do { + /* Ensure we have a valid ae */ + get_random_bytes(&rand, sizeof(rand)); + rand_ae = rand % max_aes; + } while (!test_bit(rand_ae, &ae_mask)); + + get_random_bytes(&rand, sizeof(rand)); + rand_thr = rand % hb_ctrs; + + /* Increase the heartbeat timer to prevent FW updating HB counters */ + ret = adf_hb_set_timer_to_max(accel_dev); + if (ret) + return ret; + + /* Configure worker threads to stop processing any packet */ + ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr); + if (ret) + return ret; + + /* Change HB counters memory to simulate a hang */ + adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr); + + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c index da6956699246..65bd26b25abc 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c @@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } EXPORT_SYMBOL_GPL(adf_exit_arb); + +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + const u32 *thd_2_arb_cfg; + struct arb_info info; + u32 ae_thr_map; + + if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr) + thr = ADF_AE_ADMIN_THREAD; + + hw_data->get_arb_info(&info); + thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev); + if (!thd_2_arb_cfg) + return -EFAULT; + + /* Disable scheduling for this particular AE and thread */ + ae_thr_map = *(thd_2_arb_cfg + ae); + ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2))); + + WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae, + ae_thr_map); + return 0; +} -- Gitee From 0fd98d00be47bae9530c3f4b4c91376230ed1992 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:17 +0800 Subject: [PATCH 763/953] crypto: qat - add fatal error notify method ANBZ: #8589 commit ae508d7afb753f7576c435226e32b9535b7f8b10 upstream. Intel-SIG: commit ae508d7afb75 crypto: qat - add fatal error notify method Backport to support Intel QAT in-tree driver Add error notify method to report a fatal error event to all the subsystems registered. In addition expose an API, adf_notify_fatal_error(), that allows to trigger a fatal error notification asynchronously in the context of a workqueue. This will be invoked when a fatal error is detected by the ISR or through Heartbeat. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 30 +++++++++++++++++++ .../intel/qat/qat_common/adf_common_drv.h | 3 ++ .../crypto/intel/qat/qat_common/adf_init.c | 12 ++++++++ 3 files changed, 45 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 621d14ea3b81..a0df89e3d355 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -8,6 +8,11 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" +struct adf_fatal_error_data { + struct adf_accel_dev *accel_dev; + struct work_struct work; +}; + static struct workqueue_struct *device_reset_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, @@ -181,6 +186,31 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +static void adf_notify_fatal_error_worker(struct work_struct *work) +{ + struct adf_fatal_error_data *wq_data = + container_of(work, struct adf_fatal_error_data, work); + struct adf_accel_dev *accel_dev = wq_data->accel_dev; + + adf_error_notifier(accel_dev); + kfree(wq_data); +} + +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct adf_fatal_error_data *wq_data; + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->accel_dev = accel_dev; + INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker); + adf_misc_wq_queue_work(&wq_data->work); + + return 0; +} + int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 0baae42deb3a..8c062d5a8db2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -40,6 +40,7 @@ enum adf_event { ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, + ADF_EVENT_FATAL_ERROR, }; struct service_hndl { @@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); +void adf_error_notifier(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index f43ae9111553..74f0818c0703 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) return 0; } +void adf_error_notifier(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + + list_for_each_entry(service, &service_table, list) { + if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) + dev_err(&GET_DEV(accel_dev), + "Failed to send error event to %s.\n", + service->name); + } +} + static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; -- Gitee From dfac6e35300b040a0664e78cbab8e2b6b22a1c63 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:18 +0800 Subject: [PATCH 764/953] crypto: qat - disable arbitration before reset ANBZ: #8589 commit 758a0087db98fa23a3597289dbf3643ba9db2700 upstream. Intel-SIG: commit 758a0087db98 crypto: qat - disable arbitration before reset Backport to support Intel QAT in-tree driver Disable arbitration to avoid new requests to be processed before resetting a device. This is needed so that new requests are not fetched when an error is detected. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index a0df89e3d355..ec4701a32665 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -191,8 +191,16 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) struct adf_fatal_error_data *wq_data = container_of(work, struct adf_fatal_error_data, work); struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; adf_error_notifier(accel_dev); + + if (!accel_dev->is_vf) { + /* Disable arbitration to stop processing of new requests */ + if (hw_device->exit_arb) + hw_device->exit_arb(accel_dev); + } + kfree(wq_data); } -- Gitee From dc37ea16d28d701faf4fdac9e98dbe46e3475b70 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:19 +0800 Subject: [PATCH 765/953] crypto: qat - update PFVF protocol for recovery ANBZ: #8589 commit ec26f8e6c784ae391e69b19f4738d7196ed7794d upstream. Intel-SIG: commit ec26f8e6c784 crypto: qat - update PFVF protocol for recovery Backport to support Intel QAT in-tree driver Update the PFVF logic to handle restart and recovery. This adds the following functions: * adf_pf2vf_notify_fatal_error(): allows the PF to notify VFs that the device detected a fatal error and requires a reset. This sends to VF the event `ADF_PF2VF_MSGTYPE_FATAL_ERROR`. * adf_pf2vf_wait_for_restarting_complete(): allows the PF to wait for `ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE` events from active VFs before proceeding with a reset. * adf_pf2vf_notify_restarted(): enables the PF to notify VFs with an `ADF_PF2VF_MSGTYPE_RESTARTED` event after recovery, indicating that the device is back to normal. This prompts VF drivers switch back to use the accelerator for workload processing. These changes improve the communication and synchronization between PF and VF drivers during system restart and recovery processes. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 3 + .../intel/qat/qat_common/adf_pfvf_msg.h | 7 +- .../intel/qat/qat_common/adf_pfvf_pf_msg.c | 64 ++++++++++++++++++- .../intel/qat/qat_common/adf_pfvf_pf_msg.h | 21 ++++++ .../intel/qat/qat_common/adf_pfvf_pf_proto.c | 8 +++ .../intel/qat/qat_common/adf_pfvf_vf_proto.c | 6 ++ .../crypto/intel/qat/qat_common/adf_sriov.c | 1 + 8 files changed, 109 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index a16c7e6edc65..4a3c36aaa7ca 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -332,6 +332,7 @@ struct adf_accel_vf_info { struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; + bool restarting; u8 vf_compat_ver; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index ec4701a32665..8555aeae34be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -7,6 +7,7 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_pfvf_pf_msg.h" struct adf_fatal_error_data { struct adf_accel_dev *accel_dev; @@ -199,6 +200,8 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) /* Disable arbitration to stop processing of new requests */ if (hw_device->exit_arb) hw_device->exit_arb(accel_dev); + if (accel_dev->pf.vf_info) + adf_pf2vf_notify_fatal_error(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h index 204a42438992..d1b3ef9cadac 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h @@ -99,6 +99,8 @@ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, + ADF_PF2VF_MSGTYPE_RESTARTED = 0x05, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; @@ -112,6 +114,7 @@ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; @@ -124,8 +127,10 @@ enum pfvf_compatibility_version { ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 14c069f0d71a..0e31f4b41844 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -1,21 +1,83 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ +#include #include #include "adf_accel_devices.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_pf_msg.h" #include "adf_pfvf_pf_proto.h" +#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100 +#define ADF_VF_SHUTDOWN_RETRY 100 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) { struct adf_accel_vf_info *vf; struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING }; int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) + vf->restarting = false; + if (!vf->init) + continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); + else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + } +} + +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ + int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + int i, retries = ADF_VF_SHUTDOWN_RETRY; + struct adf_accel_vf_info *vf; + bool vf_running; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); + do { + vf_running = false; + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) + if (vf->restarting) + vf_running = true; + if (!vf_running) + break; + msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY); + } while (--retries); + + if (vf_running) + dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n"); +} + +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarted msg to VF%d\n", i); + } +} + +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send fatal error msg to VF%d\n", i); } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h index e8982d1ac896..f203d88c919c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h @@ -5,7 +5,28 @@ #include "adf_accel_devices.h" +#if defined(CONFIG_PCI_IOV) void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); +#else +static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ +} +#endif typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, u8 *buffer, u8 compat); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 388e58bcbcaf..9ab93fbfefde 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, vf_info->init = false; } break; + case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE: + { + dev_dbg(&GET_DEV(accel_dev), + "Restarting Complete received from VF%d\n", vf_nr); + vf_info->restarting = false; + vf_info->init = false; + } + break; case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c index 1015155b6374..dc284a089c88 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c @@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, adf_pf2vf_handle_pf_restarting(accel_dev); return false; + case ADF_PF2VF_MSGTYPE_RESTARTED: + dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n"); + return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n"); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index f44025bb6f99..cb2a9830f192 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -103,6 +103,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) return; adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ -- Gitee From 4b692387baebfeb635fb1cfb96ede3cb8de93774 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:20 +0800 Subject: [PATCH 766/953] crypto: qat - re-enable sriov after pf reset ANBZ: #8589 commit 4469f9b2346834085fe4478ee1a851ee1de8ccb2 upstream. Intel-SIG: commit 4469f9b23468 crypto: qat - re-enable sriov after pf reset Backport to support Intel QAT in-tree driver When a Physical Function (PF) is reset, SR-IOV gets disabled, making the associated Virtual Functions (VFs) unavailable. Even after reset and using pci_restore_state, VFs remain uncreated because the numvfs still at 0. Therefore, it's necessary to reconfigure SR-IOV to re-enable VFs. This commit introduces the ADF_SRIOV_ENABLED configuration flag to cache the SR-IOV enablement state. SR-IOV is only re-enabled if it was previously configured. This commit also introduces a dedicated workqueue without `WQ_MEM_RECLAIM` flag for enabling SR-IOV during Heartbeat and CPM error resets, preventing workqueue flushing warning. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 40 ++++++++++++++++++- .../intel/qat/qat_common/adf_cfg_strings.h | 1 + .../intel/qat/qat_common/adf_common_drv.h | 5 +++ .../crypto/intel/qat/qat_common/adf_sriov.c | 37 +++++++++++++++-- 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 8555aeae34be..daa5180b4e25 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -15,6 +15,7 @@ struct adf_fatal_error_data { }; static struct workqueue_struct *device_reset_wq; +static struct workqueue_struct *device_sriov_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -43,6 +44,13 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; +/* sriov dev data */ +struct adf_sriov_dev_data { + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct sriov_work; +}; + void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -88,11 +96,22 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) } } +static void adf_device_sriov_worker(struct work_struct *work) +{ + struct adf_sriov_dev_data *sriov_data = + container_of(work, struct adf_sriov_dev_data, sriov_work); + + adf_reenable_sriov(sriov_data->accel_dev); + complete(&sriov_data->compl); +} + static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; + unsigned long wait_jiffies = msecs_to_jiffies(10000); + struct adf_sriov_dev_data sriov_data; adf_dev_restarting_notify(accel_dev); if (adf_dev_restart(accel_dev)) { @@ -104,6 +123,14 @@ static void adf_device_reset_worker(struct work_struct *work) WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } + + sriov_data.accel_dev = accel_dev; + init_completion(&sriov_data.compl); + INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker); + queue_work(device_sriov_wq, &sriov_data.sriov_work); + if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies)) + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); @@ -226,7 +253,14 @@ int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", WQ_MEM_RECLAIM, 0); - return !device_reset_wq ? -EFAULT : 0; + if (!device_reset_wq) + return -EFAULT; + + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + if (!device_sriov_wq) + return -EFAULT; + + return 0; } void adf_exit_aer(void) @@ -234,4 +268,8 @@ void adf_exit_aer(void) if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; + + if (device_sriov_wq) + destroy_workqueue(device_sriov_wq); + device_sriov_wq = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 322b76903a73..e015ad6cace2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -49,5 +49,6 @@ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" +#define ADF_SRIOV_ENABLED "SriovEnabled" #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 8c062d5a8db2..10891c9da6e7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -192,6 +192,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_reenable_sriov(struct adf_accel_dev *accel_dev); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); @@ -212,6 +213,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } +static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ +} + static inline int adf_init_pf_wq(void) { return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index cb2a9830f192..87a70c00c41e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; - vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, @@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long val = 0; + + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SRIOV_ENABLED, cfg)) + return; + + if (!accel_dev->pf.vf_info) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC)) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC)) + return; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); + adf_enable_sriov(accel_dev); +} + /** * adf_disable_sriov() - Disable SRIOV for the device * @accel_dev: Pointer to accel device. @@ -116,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - kfree(accel_dev->pf.vf_info); - accel_dev->pf.vf_info = NULL; + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -195,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (ret) return ret; + val = 1; + adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); -- Gitee From 62c80facaad58d5ecb53b7069b878f4facbad887 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:21 +0800 Subject: [PATCH 767/953] crypto: qat - add fatal error notification ANBZ: #8589 commit 2aaa1995a94a3187e52ddb9f127fa1307ee8ad00 upstream. Intel-SIG: commit 2aaa1995a94a crypto: qat - add fatal error notification Backport to support Intel QAT in-tree driver Notify a fatal error condition and optionally reset the device in the following cases: * if the device reports an uncorrectable fatal error through an interrupt * if the heartbeat feature detects that the device is not responding This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_heartbeat.c | 3 +++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index f88b1bc6857e..fe8428d4ff39 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -229,6 +229,9 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index a13d9885d60f..020d213f4c99 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) if (ras_ops->handle_interrupt && ras_ops->handle_interrupt(accel_dev, &reset_required)) { - if (reset_required) + if (reset_required) { dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); + } + return true; } -- Gitee From 8e6c24b4f720f2a4d8ef7732b5245d4eb2b912a3 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:22 +0800 Subject: [PATCH 768/953] crypto: qat - add auto reset on error ANBZ: #8589 commit f5419a4239af8b3951f990c83d0d8c865a485475 upstream. Intel-SIG: commit f5419a4239af crypto: qat - add auto reset on error Backport to support Intel QAT in-tree driver Expose the `auto_reset` sysfs attribute to configure the driver to reset the device when a fatal error is detected. When auto reset is enabled, the driver resets the device when it detects either an heartbeat failure or a fatal error through an interrupt. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Damian Muszynski Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 20 ++++++++++ .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 11 +++++- .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_sysfs.c | 37 +++++++++++++++++++ 5 files changed, 69 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index bbf329cf0d67..6778f1fea874 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -141,3 +141,23 @@ Description: 64 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/auto_reset +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Reports the current state of the autoreset feature + for a QAT device + + Write to the attribute to enable or disable device auto reset. + + Device auto reset is disabled by default. + + The values are:: + + * 1/Yy/on: auto reset enabled. If the device encounters an + unrecoverable error, it will be reset automatically. + * 0/Nn/off: auto reset disabled. If the device encounters an + unrecoverable error, it will not be reset. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 4a3c36aaa7ca..0f26aa976c8c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -402,6 +402,7 @@ struct adf_accel_dev { struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; + bool autoreset_on_error; u32 accel_id; }; #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index daa5180b4e25..79599fdb8779 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -214,6 +214,14 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->autoreset_on_error) + return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC); + + return 0; +} + static void adf_notify_fatal_error_worker(struct work_struct *work) { struct adf_fatal_error_data *wq_data = @@ -225,10 +233,11 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) if (!accel_dev->is_vf) { /* Disable arbitration to stop processing of new requests */ - if (hw_device->exit_arb) + if (accel_dev->autoreset_on_error && hw_device->exit_arb) hw_device->exit_arb(accel_dev); if (accel_dev->pf.vf_info) adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_autoreset(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 10891c9da6e7..57328249c89e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -87,6 +87,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); extern const struct pci_error_handlers adf_err_handler; void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index d450dad32c9e..4e7f70d4049d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute } static DEVICE_ATTR_RW(pm_idle_enabled); +static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char *auto_reset; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; + + return sysfs_emit(buf, "%s\n", auto_reset); +} + +static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool enabled = false; + int ret; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_dev->autoreset_on_error = enabled; + + return count; +} +static DEVICE_ATTR_RW(auto_reset); + static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); @@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = { &dev_attr_pm_idle_enabled.attr, &dev_attr_rp2srv.attr, &dev_attr_num_rps.attr, + &dev_attr_auto_reset.attr, NULL, }; -- Gitee From 700f9e2fe53237d3d971da2c16972827dd19dbd6 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:23 +0800 Subject: [PATCH 769/953] crypto: qat - limit heartbeat notifications ANBZ: #8589 commit 750fa7c20e60926431ec50d63899771ffcd9fd5c upstream. Intel-SIG: commit 750fa7c20e60 crypto: qat - limit heartbeat notifications Backport to support Intel QAT in-tree driver When the driver detects an heartbeat failure, it starts the recovery flow. Set a limit so that the number of events is limited in case the heartbeat status is read too frequently. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_heartbeat.c | 17 ++++++++++++++--- .../crypto/intel/qat/qat_common/adf_heartbeat.h | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index fe8428d4ff39..b19aa1ef8eee 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -205,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) return ret; } +static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time; + + if (time_since_reset < ADF_CFG_HB_RESET_MS) + return; + + accel_dev->heartbeat->last_hb_reset_time = curr_time; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n"); +} + void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { @@ -229,9 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; - if (adf_notify_fatal_error(accel_dev)) - dev_err(&GET_DEV(accel_dev), - "Failed to notify fatal error\n"); + adf_heartbeat_reset(accel_dev); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index 24c3f4f24c86..16fdfb48b196 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -13,6 +13,8 @@ struct dentry; #define ADF_CFG_HB_TIMER_DEFAULT_MS 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 +#define ADF_CFG_HB_RESET_MS 5000 + enum adf_device_heartbeat_status { HB_DEV_UNRESPONSIVE = 0, HB_DEV_ALIVE, @@ -30,6 +32,7 @@ struct adf_heartbeat { unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + u64 last_hb_reset_time; bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; -- Gitee From b6d7412d6e49860068e00707fb4e66ff8248e599 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:24 +0800 Subject: [PATCH 770/953] crypto: qat - improve aer error reset handling ANBZ: #8589 commit 9567d3dc760931afc38f7f1144c66dd8c4b8c680 upstream. Intel-SIG: commit 9567d3dc7609 crypto: qat - improve aer error reset handling Backport to support Intel QAT in-tree driver Rework the AER reset and recovery flow to take into account root port integrated devices that gets reset between the error detected and the slot reset callbacks. In adf_error_detected() the devices is gracefully shut down. The worker threads are disabled, the error conditions are notified to listeners and through PFVF comms and finally the device is reset as part of adf_dev_down(). In adf_slot_reset(), the device is brought up again. If SRIOV VFs were enabled before reset, these are re-enabled and VFs are notified of restarting through PFVF comms. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 79599fdb8779..9da2278bd5b7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -33,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + if (accel_dev->hw_device->exit_arb) { + dev_dbg(&pdev->dev, "Disabling arbitration\n"); + accel_dev->hw_device->exit_arb(accel_dev); + } + adf_error_notifier(accel_dev); + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_restarting_notify(accel_dev); + adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); + pci_clear_master(pdev); + adf_dev_down(accel_dev, false); + return PCI_ERS_RESULT_NEED_RESET; } @@ -190,14 +203,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int res = 0; if (!accel_dev) { pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + + if (!pdev->is_busmaster) + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + res = adf_dev_up(accel_dev, false); + if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; + adf_reenable_sriov(accel_dev); + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return PCI_ERS_RESULT_RECOVERED; } -- Gitee From 06f2139543cc335c0187cd4753c42fc99052e2ba Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Feb 2024 13:42:07 +0100 Subject: [PATCH 771/953] crypto: qat - change SLAs cleanup flow at shutdown ANBZ: #8589 commit c2304e1a0b8051a60d4eb9c99a1c509d90380ae5 upstream. Intel-SIG: commit c2304e1a0b80 crypto: qat - change SLAs cleanup flow at shutdown Backport to support Intel QAT in-tree driver The implementation of the Rate Limiting (RL) feature includes the cleanup of all SLAs during device shutdown. For each SLA, the firmware is notified of the removal through an admin message, the data structures that take into account the budgets are updated and the memory is freed. However, this explicit cleanup is not necessary as (1) the device is reset, and the firmware state is lost and (2) all RL data structures are freed anyway. In addition, if the device is unresponsive, for example after a PCI AER error is detected, the admin interface might not be available. This might slow down the shutdown sequence and cause a timeout in the recovery flows which in turn makes the driver believe that the device is not recoverable. Fix by replacing the explicit SLAs removal with just a free of the SLA data structures. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Cc: Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index de1b214dba1f..d4f2db3c53d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) sla_type_arr[node_id] = NULL; } +static void free_all_sla(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int sla_id; + + mutex_lock(&rl_data->rl_lock); + + for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { + if (!rl_data->sla[sla_id]) + continue; + + kfree(rl_data->sla[sla_id]); + rl_data->sla[sla_id] = NULL; + } + + mutex_unlock(&rl_data->rl_lock); +} + /** * add_update_sla() - handles the creation and the update of an SLA * @accel_dev: pointer to acceleration device structure @@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) return; adf_sysfs_rl_rm(accel_dev); - adf_rl_remove_sla_all(accel_dev, true); + free_all_sla(accel_dev); } void adf_rl_exit(struct adf_accel_dev *accel_dev) -- Gitee From da13ad31e34cad89e3c7bc8420672a79112a0151 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 12 Feb 2024 13:05:09 +0000 Subject: [PATCH 772/953] Documentation: qat: fix auto_reset section ANBZ: #8589 commit 2ecd43413d7668d67b9b8a56f882aa1ea12b8a62 upstream. Intel-SIG: commit 2ecd43413d76 Documentation: qat: fix auto_reset section Backport to support Intel QAT in-tree driver Remove unneeded colon in the auto_reset section. This resolves the following errors when building the documentation: Documentation/ABI/testing/sysfs-driver-qat:146: ERROR: Unexpected indentation. Documentation/ABI/testing/sysfs-driver-qat:146: WARNING: Block quote ends without a blank line; unexpected unindent. Fixes: f5419a4239af ("crypto: qat - add auto reset on error") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/linux-kernel/20240212144830.70495d07@canb.auug.org.au/T/ Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 6778f1fea874..96020fb051c3 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -153,7 +153,7 @@ Description: (RW) Reports the current state of the autoreset feature Device auto reset is disabled by default. - The values are:: + The values are: * 1/Yy/on: auto reset enabled. If the device encounters an unrecoverable error, it will be reset automatically. -- Gitee From a65009513d0b8af5a340589889096065d7d5eb35 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 13 Feb 2024 21:09:41 +0300 Subject: [PATCH 773/953] crypto: qat - uninitialized variable in adf_hb_error_inject_write() ANBZ: #8589 commit bcc06e1b3dadc76140203753a08979374c965ada upstream. Intel-SIG: commit bcc06e1b3dad crypto: qat - uninitialized variable in adf_hb_error_inject_write() Backport to support Intel QAT in-tree driver There are a few issues in this code. If *ppos is non-zero then the first part of the buffer is not initialized. We never initialize the last character of the buffer. The return is not checked so it's possible that none of the buffer is initialized. This is debugfs code which is root only and the impact of these bugs is very small. However, it's still worth fixing. To fix this: 1) Check that *ppos is zero. 2) Use copy_from_user() instead of simple_write_to_buffer(). 3) Explicitly add a NUL terminator. Fixes: e2b67859ab6e ("crypto: qat - add heartbeat error simulator") Signed-off-by: Dan Carpenter Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 5cd6c2d6f90a..cccdff24b48d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -160,16 +160,17 @@ static ssize_t adf_hb_error_inject_write(struct file *file, size_t count, loff_t *ppos) { struct adf_accel_dev *accel_dev = file->private_data; - size_t written_chars; char buf[3]; int ret; /* last byte left as string termination */ - if (count != 2) + if (*ppos != 0 || count != 2) return -EINVAL; - written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, - ppos, user_buf, count); + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + if (buf[0] != '1') return -EINVAL; @@ -183,7 +184,7 @@ static ssize_t adf_hb_error_inject_write(struct file *file, dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); - return written_chars; + return count; } static const struct file_operations adf_hb_error_inject_fops = { -- Gitee From 55e7926cf0616eb65febb07f0821247beef177d3 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:55 +0000 Subject: [PATCH 774/953] crypto: qat - remove unused macros in qat_comp_alg.c ANBZ: #8589 commit dfff0e35fa5dd84ae75052ba129b0219d83e46dc upstream. Intel-SIG: commit dfff0e35fa5d crypto: qat - remove unused macros in qat_comp_alg.c Backport to support Intel QAT in-tree driver As a result of the removal of qat_zlib_deflate, some defines where not removed. Remove them. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:21:9: warning: macro is not used [-Wunused-macros] 21 | #define QAT_RFC_1950_CM_OFFSET 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:16:9: warning: macro is not used [-Wunused-macros] 16 | #define QAT_RFC_1950_HDR_SIZE 2 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:17:9: warning: macro is not used [-Wunused-macros] 17 | #define QAT_RFC_1950_FOOTER_SIZE 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:22:9: warning: macro is not used [-Wunused-macros] 22 | #define QAT_RFC_1950_DICT_MASK 0x20 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:18:9: warning: macro is not used [-Wunused-macros] 18 | #define QAT_RFC_1950_CM_DEFLATE 8 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:20:9: warning: macro is not used [-Wunused-macros] 20 | #define QAT_RFC_1950_CM_MASK 0x0f | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:23:9: warning: macro is not used [-Wunused-macros] 23 | #define QAT_RFC_1950_COMP_HDR 0x785e | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 | ^ Fixes: e9dd20e0e5f6 ("crypto: qat - Remove zlib-deflate") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_comp_algs.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index bf8c0ee62917..2ba4aa22e092 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -13,15 +13,6 @@ #include "qat_compression.h" #include "qat_algs_send.h" -#define QAT_RFC_1950_HDR_SIZE 2 -#define QAT_RFC_1950_FOOTER_SIZE 4 -#define QAT_RFC_1950_CM_DEFLATE 8 -#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 -#define QAT_RFC_1950_CM_MASK 0x0f -#define QAT_RFC_1950_CM_OFFSET 4 -#define QAT_RFC_1950_DICT_MASK 0x20 -#define QAT_RFC_1950_COMP_HDR 0x785e - static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -- Gitee From 6c2fa6c8ad17ba494f186fb1cefed096b7fa1a0d Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:56 +0000 Subject: [PATCH 775/953] crypto: qat - removed unused macro in adf_cnv_dbgfs.c ANBZ: #8589 commit 9a5dcada14d5e027856a1bc38443e54111438da6 upstream. Intel-SIG: commit 9a5dcada14d5 crypto: qat - removed unused macro in adf_cnv_dbgfs.c Backport to support Intel QAT in-tree driver This macro was added but never used, remove it. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define CNV_SLICE_ERR_MASK GENMASK(7, 0) | ^ Fixes: d807f0240c71 ("crypto: qat - add cnv_errors debugfs file") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c index 07119c487da0..627953a72d47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -16,7 +16,6 @@ #define CNV_ERR_INFO_MASK GENMASK(11, 0) #define CNV_ERR_TYPE_MASK GENMASK(15, 12) -#define CNV_SLICE_ERR_MASK GENMASK(7, 0) #define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 #define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 -- Gitee From afdf9680ea2f19793eb672198a3e5378dadc9c7e Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:58 +0000 Subject: [PATCH 776/953] crypto: qat - remove double initialization of value ANBZ: #8589 commit a66cf93ab33853f17b8cc33a99263dd0a383a1a1 upstream. Intel-SIG: commit a66cf93ab338 crypto: qat - remove double initialization of value Backport to support Intel QAT in-tree driver Remove double initialization of the reg variable. This is to fix the following warning when compiling the QAT driver using clang scan-build: drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1010:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1010 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1109:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1109 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fixes: 99b1c9826e48 ("crypto: qat - count QAT GEN4 errors") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 048c24607939..2dd3772bf58a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1007,8 +1007,7 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; @@ -1106,8 +1105,7 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; -- Gitee From e3b3f73b6addfca8886a17caffc86f75ac759edb Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:59 +0000 Subject: [PATCH 777/953] crypto: qat - remove unnecessary description from comment ANBZ: #8589 commit ff391345141e727320ca906e6928c6a1f14e7e37 upstream. Intel-SIG: commit ff391345141e crypto: qat - remove unnecessary description from comment Backport to support Intel QAT in-tree driver Remove extra description from comments as it is not required. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c:65: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_isr.c:380: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_vf_isr.c:298: warning: contents before sections Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c | 4 ++-- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 -- drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index 86ee36feefad..f07b748795f7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake) /** * adf_clean_vf_map() - Cleans VF id mapings - * - * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs + * + * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 020d213f4c99..cae1aee5479a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -380,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); /** * adf_init_misc_wq() - Init misc workqueue * - * Function init workqueue 'qat_misc_wq' for general purpose. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_misc_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index b05c3957a160..cdbb2d687b1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); /** * adf_init_vf_wq() - Init workqueue for VF * - * Function init workqueue 'adf_vf_stop_wq' for VF. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_vf_wq(void) -- Gitee From cf0f843424af29ca933ee68b46e1429960aabd33 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:20:00 +0000 Subject: [PATCH 778/953] crypto: qat - fix comment structure ANBZ: #8589 commit bca79b9f5639b2fd4692904bce696291336e0246 upstream. Intel-SIG: commit bca79b9f5639 crypto: qat - fix comment structure Backport to support Intel QAT in-tree driver Move comment description to the same line as the function name. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_crypto.c:108: warning: missing initial short description on line: * qat_crypto_vf_dev_config() Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c index 40c8e74d1cf9..101c6ea41673 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c @@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } /** - * qat_crypto_vf_dev_config() - * create dev config required to create crypto inst. + * qat_crypto_vf_dev_config() - create dev config required to create + * crypto inst. * * @accel_dev: Pointer to acceleration device. * -- Gitee From f17eb47552e2d532434d9a3bd35098ce39a27540 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:55 +0100 Subject: [PATCH 779/953] crypto: qat - fix ring to service map for dcc in 420xx ANBZ: #8589 commit a20a6060e0dd57fecaf55487985aef28bd08c6bf upstream. Intel-SIG: commit a20a6060e0dd crypto: qat - fix ring to service map for dcc in 420xx Backport to support Intel QAT in-tree driver If a device is configured for data compression chaining (dcc), half of the engines are loaded with the symmetric crypto image and the rest are loaded with the compression image. However, in such configuration all rings can handle compression requests. Fix the ring to service mapping so that when a device is configured for dcc, the ring to service mapping reports that all rings in a bank can be used for compression. Fixes: fcf60f4bcf54 ("crypto: qat - add support for 420xx devices") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index a87d29ae724f..7909b51e97c3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -372,6 +372,13 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) if (!fw_config) return 0; + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + for (i = 0; i < RP_GROUP_COUNT; i++) { switch (fw_config[i].ae_mask) { case ADF_AE_GROUP_0: @@ -400,6 +407,7 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) } } +set_mask: ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | -- Gitee From 96bcd66b77686def8ffdc745d2dc2d6670588c85 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:56 +0100 Subject: [PATCH 780/953] crypto: qat - make ring to service map common for QAT GEN4 ANBZ: #8589 commit ed3d95fe788dec7c23bb20b41f8af47cbce04715 upstream. Intel-SIG: commit ed3d95fe788d crypto: qat - make ring to service map common for QAT GEN4 Backport to support Intel QAT in-tree driver The function get_ring_to_svc_map() is present in both 420xx and 4xxx drivers. Rework the logic to make it generic to GEN4 devices and move it to qat_common/adf_gen4_hw_data.c. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 56 +++++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 1 + 5 files changed, 90 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 7909b51e97c3..1102c47f8293 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -361,61 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -441,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -504,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_mmp_name = ADF_420XX_MMP; hw_data->uof_get_name = uof_get_name_420xx; hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->get_ena_thd_mask = get_ena_thd_mask; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index e171cddf6f02..927506cf271d 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -320,61 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT]; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -407,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -487,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) break; } hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 0f26aa976c8c..08658c3a01e9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -248,6 +248,7 @@ struct adf_hw_device_data { void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); + int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index f752653ccb47..d28e1921940a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -4,6 +4,7 @@ #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" +#include "adf_fw_config.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" @@ -433,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) return 0; } EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); + +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + unsigned int ae_mask, start_id, worker_obj_cnt, i; + u16 ring_to_svc_map; + int rp_group; + + if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || + !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) + return 0; + + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + start_id = worker_obj_cnt - RP_GROUP_COUNT; + + for (i = start_id; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return 0; + + switch (hw_data->uof_get_obj_type(accel_dev, i)) { + case ADF_FW_SYM_OBJ: + rps[rp_group] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[rp_group] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[rp_group] = COMP; + break; + default: + rps[rp_group] = 0; + break; + } + } + +set_mask: + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 7d8a774cadc8..c6e80df5a85a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); #endif -- Gitee From 8ef9c316fb73da59133773e253cf2b47e8610a9b Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Tue, 26 Mar 2024 15:48:22 +0800 Subject: [PATCH 781/953] x86: configs: Add Intel QuickAssist Technology(QAT) kernel config ANBZ: #8589 Intel-SIG: no upstream x86: configs: Add Intel QuickAssist Technology(QAT) kernel config Backport to support Intel QAT in-tree driver Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- arch/x86/configs/anolis-debug_defconfig | 4 +++- arch/x86/configs/anolis_defconfig | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index e9f3e126b433..707eaf6a15ef 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7436,10 +7436,12 @@ CONFIG_CRYPTO_DEV_QAT=m CONFIG_CRYPTO_DEV_QAT_DH895xCC=m CONFIG_CRYPTO_DEV_QAT_C3XXX=m CONFIG_CRYPTO_DEV_QAT_C62X=m -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index caace118a265..ad45ec0bbcbf 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7427,10 +7427,12 @@ CONFIG_CRYPTO_DEV_QAT=m CONFIG_CRYPTO_DEV_QAT_DH895xCC=m CONFIG_CRYPTO_DEV_QAT_C3XXX=m CONFIG_CRYPTO_DEV_QAT_C62X=m -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set -- Gitee From 99746d859674370d2ab0fe8794d11944f745679b Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:12 +0800 Subject: [PATCH 782/953] KVM: x86: Consolidate flags for __linearize() ANBZ: #8355 commit 7b0dd9430cf0c1ae19645d2a6608a5fb57faffe4 upstream. Consolidate @write and @fetch of __linearize() into a set of flags so that additional flags can be added without needing more/new boolean parameters, to precisely identify the access type. No functional change intended. Intel-SIG: commit 7b0dd9430cf0 KVM: x86: Consolidate flags for __linearize() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Acked-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-2-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/emulate.c | 21 +++++++++++---------- arch/x86/kvm/kvm_emulate.h | 4 ++++ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2673cd5c46cb..87ee1802166a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, - bool write, bool fetch, - enum x86emul_mode mode, ulong *linear) + enum x86emul_mode mode, ulong *linear, + unsigned int flags) { struct desc_struct desc; bool usable; @@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ - if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) - || !(desc.type & 2)) && write) + if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && + (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) + if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { @@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, - ctxt->mode, linear); + return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, + write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) @@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; @@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, - &linear); + rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index be7aeb9b8ea3..e24c8ac7b930 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -88,6 +88,10 @@ struct x86_instruction_info { #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ #define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */ +/* x86-specific emulation flags */ +#define X86EMUL_F_WRITE BIT(0) +#define X86EMUL_F_FETCH BIT(1) + struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); /* -- Gitee From 9a0271af26767271797f2fa376a3a8dd3cae2f2a Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:14 +0800 Subject: [PATCH 783/953] KVM: x86: Add an emulation flag for implicit system access ANBZ: #8355 commit 3963c52df42231f72277cd138994ac94f1183d2b upstream. Add an emulation flag X86EMUL_F_IMPLICIT to identify implicit system access in instruction emulation. Don't bother wiring up any usage at this point, as Linear Address Space Separation (LASS) will be the first "real" consumer of the flag and LASS support will require dedicated hooks, i.e. there aren't any existing calls where passing X86EMUL_F_IMPLICIT is meaningful. Add the IMPLICIT flag even though there's no imminent usage so that Linear Address Masking (LAM) support can reference the flag to document that addresses for implicit accesses aren't untagged. Intel-SIG: commit 3963c52df422 KVM: x86: Add an emulation flag for implicit system access Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-4-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/kvm_emulate.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index e24c8ac7b930..65fc7ef5ca3d 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -91,6 +91,7 @@ struct x86_instruction_info { /* x86-specific emulation flags */ #define X86EMUL_F_WRITE BIT(0) #define X86EMUL_F_FETCH BIT(1) +#define X86EMUL_F_IMPLICIT BIT(2) struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); -- Gitee From 267dbb58284a3efcbd789e19cf5de41969cc63b5 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:15 +0800 Subject: [PATCH 784/953] KVM: x86: Add X86EMUL_F_INVLPG and pass it in em_invlpg() ANBZ: #8355 commit 538ac9a92d669c4ccfc64739a32efab2793cea1d upstream. Add an emulation flag X86EMUL_F_INVLPG, which is used to identify an instruction that does TLB invalidation without true memory access. Only invlpg & invlpga implemented in emulator belong to this kind. invlpga doesn't need additional information for emulation. Just pass the flag to em_invlpg(). Linear Address Masking (LAM) and Linear Address Space Separation (LASS) don't apply to addresses that are inputs to TLB invalidation. The flag will be consumed to support LAM/LASS virtualization. Intel-SIG: commit 538ac9a92d66 KVM: x86: Add X86EMUL_F_INVLPG and pass it in em_invlpg() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-5-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/emulate.c | 4 +++- arch/x86/kvm/kvm_emulate.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 87ee1802166a..ceec8c5f9687 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3440,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; + unsigned int max_size; - rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); + rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode, + &linear, X86EMUL_F_INVLPG); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 65fc7ef5ca3d..8bd9b23543cc 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -92,6 +92,7 @@ struct x86_instruction_info { #define X86EMUL_F_WRITE BIT(0) #define X86EMUL_F_FETCH BIT(1) #define X86EMUL_F_IMPLICIT BIT(2) +#define X86EMUL_F_INVLPG BIT(3) struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); -- Gitee From 0494b095e25372bcd195da04bf1614433680e06b Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:16 +0800 Subject: [PATCH 785/953] KVM: x86/mmu: Drop non-PA bits when getting GFN for guest's PGD ANBZ: #8355 commit a130066f74008858ac425b7497d231742474a0ea upstream. Drop non-PA bits when getting GFN for guest's PGD with the maximum theoretical mask for guest MAXPHYADDR. Do it unconditionally because it's harmless for 32-bit guests, querying 64-bit mode would be more expensive, and for EPT the mask isn't tied to guest mode. Using PT_BASE_ADDR_MASK would be technically wrong (PAE paging has 64-bit elements _except_ for CR3, which has only 32 valid bits), it wouldn't matter in practice though. Opportunistically use GENMASK_ULL() to define __PT_BASE_ADDR_MASK. Intel-SIG: commit a130066f7400 KVM: x86/mmu: Drop non-PA bits when getting GFN for guest's PGD Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-6-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 1 + arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f7901cb4d2fa..1551657ff77d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3774,7 +3774,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) hpa_t root; root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); - root_gfn = root_pgd >> PAGE_SHIFT; + root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT; if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { mmu->root.hpa = kvm_mmu_get_dummy_root(); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index decc1f153669..68f8564d85a9 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -13,6 +13,7 @@ #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ +#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index c85255073f67..4d4e98fe4f35 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -62,7 +62,7 @@ #endif /* Common logic, but per-type values. These also need to be undefined. */ -#define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))) +#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK) #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS) -- Gitee From 825e67bf60f3a3b5c3b56b4768a9da9be75b3aab Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:17 +0800 Subject: [PATCH 786/953] KVM: x86: Add & use kvm_vcpu_is_legal_cr3() to check CR3's legality ANBZ: #8355 commit 2c49db455ee27c72a680c9e4fad1c12433902ee3 upstream. Add and use kvm_vcpu_is_legal_cr3() to check CR3's legality to provide a clear distinction between CR3 and GPA checks. This will allow exempting bits from kvm_vcpu_is_legal_cr3() without affecting general GPA checks, e.g. for upcoming features that will use high bits in CR3 for feature enabling. No functional change intended. Intel-SIG: commit 2c49db455ee2 KVM: x86: Add & use kvm_vcpu_is_legal_cr3() to check CR3's legality Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-7-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 5 +++++ arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/x86.c | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 284fa4704553..ca1fdab31d1e 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -278,4 +278,9 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, vcpu->arch.governed_features.enabled); } +static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + return kvm_vcpu_is_legal_gpa(vcpu, cr3); +} + #endif diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 60891b9ce25f..b780bd468d51 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -296,7 +296,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { if (CC(!(save->cr4 & X86_CR4_PAE)) || CC(!(save->cr0 & X86_CR0_PE)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) return false; } @@ -505,7 +505,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) return -EINVAL; if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index c5ec0ef51ff7..db61cf8e3128 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1085,7 +1085,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2912,7 +2912,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 665e77008531..2bf3f2a1e636 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1284,7 +1284,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) * stuff CR3, e.g. for RSM emulation, and there is no guarantee that * the current vCPU mode is accurate. */ - if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) return 1; if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) @@ -11526,7 +11526,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) return false; } else { /* -- Gitee From 088aed9c60806e6c54e94af636f8e08f2c644ad1 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:18 +0800 Subject: [PATCH 787/953] KVM: x86: Remove kvm_vcpu_is_illegal_gpa() ANBZ: #8355 commit 9c8021d4ae85f1531230fc33653e06e9f1fdb7f1 upstream. Remove kvm_vcpu_is_illegal_gpa() and use !kvm_vcpu_is_legal_gpa() instead. The "illegal" helper actually predates the "legal" helper, the only reason the "illegal" variant wasn't removed by commit 4bda0e97868a ("KVM: x86: Add a helper to check for a legal GPA") was to avoid code churn. Now that CR3 has a dedicated helper, there are fewer callers, and so the code churn isn't that much of a deterrent. No functional change intended. Intel-SIG: commit 9c8021d4ae85 KVM: x86: Remove kvm_vcpu_is_illegal_gpa() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-8-binbin.wu@linux.intel.com [sean: provide a bit of history in the changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 5 ----- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index ca1fdab31d1e..31b7def60282 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -47,11 +47,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return !(gpa & vcpu->arch.reserved_gpa_bits); } -static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return !kvm_vcpu_is_legal_gpa(vcpu, gpa); -} - static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment) { diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index db61cf8e3128..51622878d6e4 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2717,7 +2717,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) + if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0ca940187502..507048e618c7 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5791,7 +5791,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * would also use advanced VM-exit information for EPT violations to * reconstruct the page fault error code. */ - if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) + if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa))) return kvm_emulate_instruction(vcpu, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); -- Gitee From e1d4ea07d4207b2b175be1d4c03e99312597c6c2 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:19 +0800 Subject: [PATCH 788/953] KVM: x86: Introduce get_untagged_addr() in kvm_x86_ops and call it in emulator ANBZ: #8355 commit 37a41847b770c722e98ace72f3851fb49b360c08 upstream. Introduce a new interface get_untagged_addr() to kvm_x86_ops to untag the metadata from linear address. Call the interface in linearization of instruction emulator for 64-bit mode. When enabled feature like Intel Linear Address Masking (LAM) or AMD Upper Address Ignore (UAI), linear addresses may be tagged with metadata that needs to be dropped prior to canonicality checks, i.e. the metadata is ignored. Introduce get_untagged_addr() to kvm_x86_ops to hide the vendor specific code, as sadly LAM and UAI have different semantics. Pass the emulator flags to allow vendor specific implementation to precisely identify the access type (LAM doesn't untag certain accesses). Intel-SIG: commit 37a41847b770 KVM: x86: Introduce get_untagged_addr() in kvm_x86_ops and call it in emulator Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-9-binbin.wu@linux.intel.com [sean: massage changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log and resolve the conflict ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/emulate.c | 2 +- arch/x86/kvm/kvm_emulate.h | 3 +++ arch/x86/kvm/x86.c | 10 ++++++++++ 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index b54e72a0100b..0c540ac3872e 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,7 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(get_untagged_addr) KVM_X86_OP_OPTIONAL(vm_attestation) KVM_X86_OP_OPTIONAL(control_pre_system_reset) KVM_X86_OP_OPTIONAL(control_post_system_reset) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 36587c6db312..0c4445de260d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1751,6 +1751,8 @@ struct kvm_x86_ops { */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ceec8c5f9687..e223043ef5b2 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: - *linear = la; + *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags); va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) goto bad; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 8bd9b23543cc..e6d149825169 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -230,6 +230,9 @@ struct x86_emulate_ops { int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); + + gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, + unsigned int flags); }; /* Type, address-of, and value of an instruction's operand. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2bf3f2a1e636..7673143f173f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8357,6 +8357,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) kvm_vm_bugged(kvm); } +static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt, + gva_t addr, unsigned int flags) +{ + if (!kvm_x86_ops.get_untagged_addr) + return addr; + + return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags); +} + static const struct x86_emulate_ops emulate_ops = { .vm_bugged = emulator_vm_bugged, .read_gpr = emulator_read_gpr, @@ -8401,6 +8410,7 @@ static const struct x86_emulate_ops emulate_ops = { .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, + .get_untagged_addr = emulator_get_untagged_addr, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) -- Gitee From 94464db44ad4d4929b678eb265ab8710e529adc4 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:20 +0800 Subject: [PATCH 789/953] KVM: x86: Untag addresses for LAM emulation where applicable ANBZ: #8355 commit b39bd520a60c667a339e315ce7a3de2f7178f6e3 upstream. Stub in vmx_get_untagged_addr() and wire up calls from the emulator (via get_untagged_addr()) and "direct" calls from various VM-Exit handlers in VMX where LAM untagging is supposed to be applied. Defer implementing the guts of vmx_get_untagged_addr() to future patches purely to make the changes easier to consume. LAM is active only for 64-bit linear addresses and several types of accesses are exempted. - Cases need to untag address (handled in get_vmx_mem_address()) Operand(s) of VMX instructions and INVPCID. Operand(s) of SGX ENCLS. - Cases LAM doesn't apply to (no change needed) Operand of INVLPG. Linear address in INVPCID descriptor. Linear address in INVVPID descriptor. BASEADDR specified in SECS of ECREATE. Note: - LAM doesn't apply to write to control registers or MSRs - LAM masking is applied before walking page tables, i.e. the faulting linear address in CR2 doesn't contain the metadata. - The guest linear address saved in VMCS doesn't contain metadata. Intel-SIG: commit b39bd520a60c KVM: x86: Untag addresses for LAM emulation where applicable Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-10-binbin.wu@linux.intel.com [sean: massage changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/vmx/nested.c | 5 +++++ arch/x86/kvm/vmx/sgx.c | 1 + arch/x86/kvm/vmx/vmx.c | 7 +++++++ arch/x86/kvm/vmx/vmx.h | 2 ++ arch/x86/kvm/x86.c | 4 ++++ 5 files changed, 19 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 51622878d6e4..4ba46e1b29d2 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4980,6 +4980,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, else *ret = off; + *ret = vmx_get_untagged_addr(vcpu, *ret, 0); /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! @@ -5797,6 +5798,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if (!operand.vpid || is_noncanonical_address(operand.gla, vcpu)) return nested_vmx_fail(vcpu, diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 3e822e582497..6fef01e0536e 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, if (!IS_ALIGNED(*gva, alignment)) { fault = true; } else if (likely(is_64_bit_mode(vcpu))) { + *gva = vmx_get_untagged_addr(vcpu, *gva, 0); fault = is_noncanonical_address(*gva, vcpu); } else { *gva &= 0xffffffff; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 507048e618c7..29f2cc5a2084 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8222,6 +8222,11 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) +{ + return gva; +} + static struct kvm_x86_ops vmx_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -8362,6 +8367,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .complete_emulated_msr = kvm_complete_insn_gp, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, + + .get_untagged_addr = vmx_get_untagged_addr, }; static unsigned int vmx_handle_intel_pt_intr(void) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index c2130d2c8e24..45cee1a8bc0a 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -420,6 +420,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7673143f173f..8e4981e06ede 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13460,6 +13460,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) switch (type) { case INVPCID_TYPE_INDIV_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if ((!pcid_enabled && (operand.pcid != 0)) || is_noncanonical_address(operand.gla, vcpu)) { kvm_inject_gp(vcpu, 0); -- Gitee From 386fb035c52c5ccf9296b6d680ede9658ce556b7 Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:21 +0800 Subject: [PATCH 790/953] KVM: x86: Virtualize LAM for supervisor pointer ANBZ: #8355 commit 93d1c9f498a7505e0e0a0198f3b3d7f97fcc5fa6 upstream. Add support to allow guests to set the new CR4 control bit for LAM and add implementation to get untagged address for supervisor pointers. LAM modifies the canonicality check applied to 64-bit linear addresses for data accesses, allowing software to use of the untranslated address bits for metadata and masks the metadata bits before using them as linear addresses to access memory. LAM uses CR4.LAM_SUP (bit 28) to configure and enable LAM for supervisor pointers. It also changes VMENTER to allow the bit to be set in VMCS's HOST_CR4 and GUEST_CR4 to support virtualization. Note CR4.LAM_SUP is allowed to be set even not in 64-bit mode, but it will not take effect since LAM only applies to 64-bit linear addresses. Move CR4.LAM_SUP out of CR4_RESERVED_BITS, its reservation depends on vcpu supporting LAM or not. Leave it intercepted to prevent guest from setting the bit if LAM is not exposed to guest as well as to avoid vmread every time when KVM fetches its value, with the expectation that guest won't toggle the bit frequently. Set CR4.LAM_SUP bit in the emulated IA32_VMX_CR4_FIXED1 MSR for guests to allow guests to enable LAM for supervisor pointers in nested VMX operation. Hardware is not required to do TLB flush when CR4.LAM_SUP toggled, KVM doesn't need to emulate TLB flush based on it. There's no other features or vmx_exec_controls connection, and no other code needed in {kvm,vmx}_set_cr4(). Skip address untag for instruction fetches (which includes branch targets), operand of INVLPG instructions, and implicit system accesses, all of which are not subject to untagging. Note, get_untagged_addr() isn't invoked for implicit system accesses as there is no reason to do so, but check the flag anyways for documentation purposes. Intel-SIG: commit 93d1c9f498a7 KVM: x86: Virtualize LAM for supervisor pointer Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Robert Hoo Co-developed-by: Binbin Wu Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Reviewed-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-11-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/vmx/vmx.c | 39 ++++++++++++++++++++++++++++++++- arch/x86/kvm/x86.h | 2 ++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0c4445de260d..c2442ffe3056 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -125,7 +125,8 @@ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ - | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) + | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ + | X86_CR4_LAM_SUP)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 29f2cc5a2084..82489168c63e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7690,6 +7690,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); + entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1); + cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM)); + #undef cr4_fixed1_update } @@ -8222,9 +8225,43 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +/* + * Note, the SDM states that the linear address is masked *after* the modified + * canonicality check, whereas KVM masks (untags) the address and then performs + * a "normal" canonicality check. Functionally, the two methods are identical, + * and when the masking occurs relative to the canonicality check isn't visible + * to software, i.e. KVM's behavior doesn't violate the SDM. + */ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) { - return gva; + int lam_bit; + + if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) + return gva; + + if (!is_64_bit_mode(vcpu)) + return gva; + + /* + * Bit 63 determines if the address should be treated as user address + * or a supervisor address. + */ + if (!(gva & BIT_ULL(63))) { + /* KVM doesn't yet virtualize LAM_U{48,57}. */ + return gva; + } else { + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) + return gva; + + lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47; + } + + /* + * Untag the address by sign-extending the lam_bit, but NOT to bit 63. + * Bit 63 is retained from the raw virtual address so that untagging + * doesn't change a user access to a supervisor access, and vice versa. + */ + return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63)); } static struct kvm_x86_ops vmx_x86_ops __initdata = { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1e7be1f6ab29..53e883721e71 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -529,6 +529,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); __reserved_bits |= X86_CR4_VMXE; \ if (!__cpu_has(__c, X86_FEATURE_PCID)) \ __reserved_bits |= X86_CR4_PCIDE; \ + if (!__cpu_has(__c, X86_FEATURE_LAM)) \ + __reserved_bits |= X86_CR4_LAM_SUP; \ __reserved_bits; \ }) -- Gitee From ebc9e0ce38f6cf723541a114493f015daf02565c Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:22 +0800 Subject: [PATCH 791/953] KVM: x86: Virtualize LAM for user pointer ANBZ: #8355 commit 3098e6eca88e543ea0d190d1fa72b1c047bb3e7d upstream. Add support to allow guests to set the new CR3 control bits for Linear Address Masking (LAM) and add implementation to get untagged address for user pointers. LAM modifies the canonical check for 64-bit linear addresses, allowing software to use the masked/ignored address bits for metadata. Hardware masks off the metadata bits before using the linear addresses to access memory. LAM uses two new CR3 non-address bits, LAM_U48 (bit 62) and LAM_U57 (bit 61), to configure LAM for user pointers. LAM also changes VMENTER to allow both bits to be set in VMCS's HOST_CR3 and GUEST_CR3 for virtualization. When EPT is on, CR3 is not trapped by KVM and it's up to the guest to set any of the two LAM control bits. However, when EPT is off, the actual CR3 used by the guest is generated from the shadow MMU root which is different from the CR3 that is *set* by the guest, and KVM needs to manually apply any active control bits to VMCS's GUEST_CR3 based on the cached CR3 *seen* by the guest. KVM manually checks guest's CR3 to make sure it points to a valid guest physical address (i.e. to support smaller MAXPHYSADDR in the guest). Extend this check to allow the two LAM control bits to be set. After check, LAM bits of guest CR3 will be stripped off to extract guest physical address. In case of nested, for a guest which supports LAM, both VMCS12's HOST_CR3 and GUEST_CR3 are allowed to have the new LAM control bits set, i.e. when L0 enters L1 to emulate a VMEXIT from L2 to L1 or when L0 enters L2 directly. KVM also manually checks VMCS12's HOST_CR3 and GUEST_CR3 being valid physical address. Extend such check to allow the new LAM control bits too. Note, LAM doesn't have a global control bit to turn on/off LAM completely, but purely depends on hardware's CPUID to determine it can be enabled or not. That means, when EPT is on, even when KVM doesn't expose LAM to guest, the guest can still set LAM control bits in CR3 w/o causing problem. This is an unfortunate virtualization hole. KVM could choose to intercept CR3 in this case and inject fault but this would hurt performance when running a normal VM w/o LAM support. This is undesirable. Just choose to let the guest do such illegal thing as the worst case is guest being killed when KVM eventually find out such illegal behaviour and that the guest is misbehaving. Intel-SIG: commit 3098e6eca88e KVM: x86: Virtualize LAM for user pointer Backport KVM Linear Address Masking (LAM) support. Suggested-by: Sean Christopherson Signed-off-by: Robert Hoo Co-developed-by: Binbin Wu Signed-off-by: Binbin Wu Reviewed-by: Kai Huang Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-12-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 4 ++++ arch/x86/kvm/mmu.h | 9 +++++++++ arch/x86/kvm/vmx/vmx.c | 12 +++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 31b7def60282..3c579ce2f60f 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -275,6 +275,10 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { + if (kvm_cpu_cap_has(X86_FEATURE_LAM) && + guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); + return kvm_vcpu_is_legal_gpa(vcpu, cr3); } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 253fb2093d5d..e700f1f854ae 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -146,6 +146,15 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); } +static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) +{ + if (!kvm_cpu_cap_has(X86_FEATURE_LAM) || + !guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + return 0; + + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); +} + static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) { u64 root_hpa = vcpu->arch.mmu->root.hpa; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 82489168c63e..43dab32d5a6d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3409,7 +3409,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { - guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) | + kvm_get_active_cr3_lam_bits(vcpu); } if (update_guest_cr3) @@ -8235,6 +8236,7 @@ static void vmx_vm_destroy(struct kvm *kvm) gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) { int lam_bit; + unsigned long cr3_bits; if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) return gva; @@ -8247,8 +8249,12 @@ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags * or a supervisor address. */ if (!(gva & BIT_ULL(63))) { - /* KVM doesn't yet virtualize LAM_U{48,57}. */ - return gva; + cr3_bits = kvm_get_active_cr3_lam_bits(vcpu); + if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48))) + return gva; + + /* LAM_U48 is ignored if LAM_U57 is set. */ + lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47; } else { if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) return gva; -- Gitee From ce9dc57cfd66b5553e237ca6052c638bdcdccc5b Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:23 +0800 Subject: [PATCH 792/953] KVM: x86: Advertise and enable LAM (user and supervisor) ANBZ: #8355 commit 703d794cb8cb28c07b22c1c845f5c4d4c419aff7 upstream. LAM is enumerated by CPUID.7.1:EAX.LAM[bit 26]. Advertise the feature to userspace and enable it as the final step after the LAM virtualization support for supervisor and user pointers. SGX LAM support is not advertised yet. SGX LAM support is enumerated in SGX's own CPUID and there's no hard requirement that it must be supported when LAM is reported in CPUID leaf 0x7. Intel-SIG: commit 703d794cb8cb KVM: x86: Advertise and enable LAM (user and supervisor) Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Robert Hoo Signed-off-by: Binbin Wu Reviewed-by: Jingqi Liu Reviewed-by: Chao Gao Reviewed-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-13-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index d65f91d0a8bc..ca52b05c1f4f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -669,7 +669,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_7_1_EAX, F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | - F(AMX_FP16) | F(AVX_IFMA) + F(AMX_FP16) | F(AVX_IFMA) | F(LAM) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, -- Gitee From a206c4c018a11c5a400f717921ceacaae75f50ec Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:24 +0800 Subject: [PATCH 793/953] KVM: x86: Use KVM-governed feature framework to track "LAM enabled" ANBZ: #8355 commit 183bdd161c2b773a62f01d1c030f5a3a5b7c33b5 upstream. Use the governed feature framework to track if Linear Address Masking (LAM) is "enabled", i.e. if LAM can be used by the guest. Using the framework to avoid the relative expensive call guest_cpuid_has() during cr3 and vmexit handling paths for LAM. No functional change intended. Intel-SIG: commit 183bdd161c2b KVM: x86: Use KVM-governed feature framework to track "LAM enabled" Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-14-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 3 +-- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/mmu.h | 3 +-- arch/x86/kvm/vmx/vmx.c | 1 + 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 3c579ce2f60f..93c63ba29337 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -275,8 +275,7 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { - if (kvm_cpu_cap_has(X86_FEATURE_LAM) && - guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + if (guest_can_use(vcpu, X86_FEATURE_LAM)) cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); return kvm_vcpu_is_legal_gpa(vcpu, cr3); diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 423a73395c10..ad463b1ed4e4 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) KVM_GOVERNED_X86_FEATURE(VGIF) KVM_GOVERNED_X86_FEATURE(VNMI) +KVM_GOVERNED_X86_FEATURE(LAM) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index e700f1f854ae..f04cc5ade1cd 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -148,8 +148,7 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) { - if (!kvm_cpu_cap_has(X86_FEATURE_LAM) || - !guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + if (!guest_can_use(vcpu, X86_FEATURE_LAM)) return 0; return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 43dab32d5a6d..f54929487b84 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7780,6 +7780,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM); vmx_setup_uret_msrs(vmx); -- Gitee From 9d36811eea530c4cc54efe30baedcb9e057fb547 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Mon, 26 Feb 2024 11:54:35 +0800 Subject: [PATCH 794/953] fuse: add support for explicit export disabling ANBZ: #8702 commit e022f6a1c711ab6d76e9e59dce77e2b25df75076 upstream. open_by_handle_at(2) can fail with -ESTALE with a valid handle returned by a previous name_to_handle_at(2) for evicted fuse inodes, which is especially common when entry_valid_timeout is 0, e.g. when the fuse daemon is in "cache=none" mode. The time sequence is like: name_to_handle_at(2) # succeed evict fuse inode open_by_handle_at(2) # fail The root cause is that, with 0 entry_valid_timeout, the dput() called in name_to_handle_at(2) will trigger iput -> evict(), which will send FUSE_FORGET to the daemon. The following open_by_handle_at(2) will send a new FUSE_LOOKUP request upon inode cache miss since the previous inode eviction. Then the fuse daemon may fail the FUSE_LOOKUP request with -ENOENT as the cached metadata of the requested inode has already been cleaned up during the previous FUSE_FORGET. The returned -ENOENT is treated as -ESTALE when open_by_handle_at(2) returns. This confuses the application somehow, as open_by_handle_at(2) fails when the previous name_to_handle_at(2) succeeds. The returned errno is also confusing as the requested file is not deleted and already there. It is reasonable to fail name_to_handle_at(2) early in this case, after which the application can fallback to open(2) to access files. Since this issue typically appears when entry_valid_timeout is 0 which is configured by the fuse daemon, the fuse daemon is the right person to explicitly disable the export when required. Also considering FUSE_EXPORT_SUPPORT actually indicates the support for lookups of "." and "..", and there are existing fuse daemons supporting export without FUSE_EXPORT_SUPPORT set, for compatibility, we add a new INIT flag for such purpose. Reviewed-by: Amir Goldstein Signed-off-by: Jingbo Xu Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3008 --- fs/fuse/inode.c | 11 ++++++++++- include/uapi/linux/fuse.h | 5 +++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 8e5fba985097..aaad201d68d8 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1118,6 +1118,11 @@ static struct dentry *fuse_get_parent(struct dentry *child) return parent; } +/* only for fid encoding; no support for file handle */ +static const struct export_operations fuse_export_fid_operations = { + .encode_fh = fuse_encode_fh, +}; + static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, @@ -1292,6 +1297,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->create_supp_group = 1; if (flags & FUSE_DIRECT_IO_ALLOW_MMAP) fc->direct_io_allow_mmap = 1; + if (flags & FUSE_NO_EXPORT_SUPPORT) + fm->sb->s_export_op = &fuse_export_fid_operations; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1338,7 +1345,8 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | - FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP; + FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | + FUSE_NO_EXPORT_SUPPORT; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; @@ -1533,6 +1541,7 @@ static int fuse_fill_super_submount(struct super_block *sb, sb->s_bdi = bdi_get(parent_sb->s_bdi); sb->s_xattr = parent_sb->s_xattr; + sb->s_export_op = parent_sb->s_export_op; sb->s_time_gran = parent_sb->s_time_gran; sb->s_blocksize = parent_sb->s_blocksize; sb->s_blocksize_bits = parent_sb->s_blocksize_bits; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index e7418d15fe39..33b56d9e4803 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -211,6 +211,9 @@ * 7.39 * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures + * + * 7.40 + * - add FUSE_NO_EXPORT_SUPPORT init flag */ #ifndef _LINUX_FUSE_H @@ -410,6 +413,7 @@ struct fuse_file_lock { * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. + * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -449,6 +453,7 @@ struct fuse_file_lock { #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) +#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP -- Gitee From 55920717ae2198647ea63126751c9f183f04f5c4 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Sat, 18 Apr 2020 19:35:55 +0800 Subject: [PATCH 795/953] anolis: sched: add kconfig SCHED_SLI ANBZ: #8657 This introduces the new bool kconfig SCHED_SLI, determining whether the scheduler SLI feature should be built-in or not. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- init/Kconfig | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index d1904381ffc8..61c7ed19fa7b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1124,6 +1124,21 @@ config CGROUP_DEVICE Provides a cgroup controller implementing whitelists for devices which a process in the cgroup can mknod or open. +config SCHED_SLI + bool "cgroup CPU usage and additional scheduler statistics" + depends on CGROUP_CPUACCT + depends on FAIR_GROUP_SCHED + default Y + help + This accounts CPU time spent by tasks in a cgroup into "usr%" "sys%" + "idle" "steal%" "irq%" "softirq%" "guest%". And this exports + nr_migrations, nr_running, nr_uninterruptible of a cgroup. + + The corresponding interface is cpuacct.proc_stat. + + Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted + together to take effect. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help -- Gitee From 6adcf540c4d0e278e5559e5d69a9cadd097e1542 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Mon, 11 Mar 2024 15:29:03 +0800 Subject: [PATCH 796/953] anolis: configs: enable CONFIG_SCHED_SLI ANBZ: #8657 Enable CONFIG_SCHED_SLI by default. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 1c039289682b..a9dd69260bb5 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -195,6 +195,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 4b995ee48cd6..1935cdb56003 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -194,6 +194,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 707eaf6a15ef..ba7a304585a4 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -214,6 +214,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ad45ec0bbcbf..1595b3a3616d 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -213,6 +213,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y -- Gitee From 59a2a648e797d69aad807eb3cb70320c1a1eae89 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Thu, 3 Jun 2021 15:14:39 +0800 Subject: [PATCH 797/953] anolis: sched: Maintain "nr_uninterruptible" in runqueue ANBZ: #8657 It's relatively easy to maintain nr_uninterruptible in scheduler compared to doing it in cpuacct, we assume that "cpu,cpuacct" are bound together, so that it can be used for per-cgroup load. This will be needed to calculate per-cgroup load average later. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/cgroup.h | 2 ++ include/linux/sched.h | 4 ++++ kernel/cgroup/cgroup.c | 6 ++++++ kernel/sched/core.c | 26 ++++++++++++++++++++++++-- kernel/sched/fair.c | 13 +++++++++++++ kernel/sched/rt.c | 13 +++++++++++++ kernel/sched/sched.h | 14 +++++++++++++- 7 files changed, 75 insertions(+), 3 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c..f1219e29d309 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -121,6 +121,8 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); +extern struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid); void cgroup_fork(struct task_struct *p); extern int cgroup_can_fork(struct task_struct *p, diff --git a/include/linux/sched.h b/include/linux/sched.h index 06117e8d7420..5190119e03a4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -133,6 +133,10 @@ struct user_event_mm; #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((READ_ONCE((task)->__state) & TASK_UNINTERRUPTIBLE) != 0 && \ + (READ_ONCE((task)->__state) & TASK_FROZEN) == 0 && \ + (READ_ONCE((task)->__state) & TASK_NOLOAD) == 0) /* * Special states are those that do not use the normal wait-loop pattern. See diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e4cfe8f49fe5..255c400953a9 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -493,6 +493,12 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, return &cgrp->self; } +struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid) +{ + return cgroup_css(cgrp, cgroup_subsys[(ssid)]); +} + /** * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss * @cgrp: the cgroup of interest diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 286dcd3de048..6e39e68d88be 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2136,6 +2136,12 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); } +static void update_nr_uninterruptible(struct task_struct *tsk, long inc) +{ + if (tsk->sched_class->update_nr_uninterruptible) + tsk->sched_class->update_nr_uninterruptible(tsk, inc); +} + void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_on_rq_migrating(p)) @@ -3789,8 +3795,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, lockdep_assert_rq_held(rq); - if (p->sched_contributes_to_load) + if (p->sched_contributes_to_load) { + update_nr_uninterruptible(p, -1); rq->nr_uninterruptible--; + } #ifdef CONFIG_SMP if (wake_flags & WF_MIGRATED) @@ -6803,8 +6811,10 @@ static void __sched notrace __schedule(unsigned int sched_mode) !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); - if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load) { + update_nr_uninterruptible(prev, 1); rq->nr_uninterruptible++; + } /* * __schedule() ttwu() @@ -10702,8 +10712,20 @@ void sched_move_task(struct task_struct *tsk) if (running) put_prev_task(rq, tsk); + /* decrease old group */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, -1); + sched_change_group(tsk, group); + /* increase new group after change */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, 1); + if (queued) enqueue_task(rq, tsk, queue_flags); if (running) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 44d9f645c817..3321918c0e39 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -13110,6 +13110,16 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task return rr_interval; } +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_fair(struct task_struct *p, long inc) +{ + struct sched_entity *se = &p->se; + + for_each_sched_entity(se) + cfs_rq_of(se)->nr_uninterruptible += inc; +} +#endif + /* * All the scheduling class methods: */ @@ -13157,6 +13167,9 @@ DEFINE_SCHED_CLASS(fair) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_fair, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_fair, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4ac36eb4cdee..c75e1a62c6fa 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2704,6 +2704,16 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) } #endif +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_rt(struct task_struct *p, long inc) +{ + struct sched_rt_entity *se = &p->rt; + + for_each_sched_rt_entity(se) + rt_rq_of_se(se)->nr_uninterruptible += inc; +} +#endif + DEFINE_SCHED_CLASS(rt) = { .enqueue_task = enqueue_task_rt, @@ -2740,6 +2750,9 @@ DEFINE_SCHED_CLASS(rt) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_rt, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_rt, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fe396568d71c..2862e421e411 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -660,6 +660,8 @@ struct cfs_rq { #endif #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + + unsigned long nr_uninterruptible; }; static inline int rt_bandwidth_enabled(void) @@ -706,6 +708,8 @@ struct rt_rq { struct rq *rq; struct task_group *tg; #endif + + unsigned long nr_uninterruptible; }; static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) @@ -1028,7 +1032,14 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; + + /* + * Frequent writing to prev_mm and clock_update_flags on local + * CPU causes cacheline containing idle to be invalidated on + * other CPUs. Put prev_mm and sequential fields on a new + * cacheline to fix it. + */ + struct mm_struct *prev_mm ____cacheline_aligned; unsigned int clock_update_flags; u64 clock; @@ -2318,6 +2329,7 @@ struct sched_class { #ifdef CONFIG_SCHED_CORE int (*task_is_throttled)(struct task_struct *p, int cpu); #endif + void (*update_nr_uninterruptible)(struct task_struct *p, long inc); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) -- Gitee From aedbcfb4f989362b4187b096a31ec6bc0cb82c09 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Thu, 3 Jun 2021 16:23:39 +0800 Subject: [PATCH 798/953] anolis: cpuacct: export cpuacct.proc_stat interface ANBZ: #8657 Add the cgroup file "cpuacct.proc_stat", we'll export per-cgroup cpu usages and some other scheduler statistics in this interface. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/stat.c | 2 +- include/linux/sched.h | 5 ++ kernel/sched/cpuacct.c | 148 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 5 ++ 4 files changed, 159 insertions(+), 1 deletion(-) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index da60956b2915..ce8a751185f5 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -38,7 +38,7 @@ u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) return idle; } -static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) +u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) { u64 iowait, iowait_usecs = -1ULL; diff --git a/include/linux/sched.h b/include/linux/sched.h index 5190119e03a4..4736b0902102 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2507,4 +2507,9 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +struct cpuacct_usage_result { + u64 user, nice, system, irq, softirq; + u64 steal, iowait, idle, guest, guest_nice; +}; + #endif diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0de9dda09949..67b6c7817397 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -20,11 +20,17 @@ static const char * const cpuacct_stat_desc[] = { [CPUACCT_STAT_SYSTEM] = "system", }; +struct cpuacct_prev_cputime { + struct prev_cputime prev_cputime1; /* utime and stime */ + struct prev_cputime prev_cputime2; /* user and nice */ +} ____cacheline_aligned; + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every CPU */ u64 __percpu *cpuusage; + struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; }; @@ -45,8 +51,10 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) } static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); +static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, + .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, }; @@ -55,6 +63,7 @@ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuacct *ca; + int i; if (!parent_css) return &root_cpuacct.css; @@ -71,8 +80,21 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->cpustat) goto out_free_cpuusage; + ca->prev_cputime = alloc_percpu(struct cpuacct_prev_cputime); + if (!ca->prev_cputime) + goto out_free_cpustat; + + for_each_possible_cpu(i) { + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); + } + return &ca->css; +out_free_cpustat: + free_percpu(ca->cpustat); out_free_cpuusage: free_percpu(ca->cpuusage); out_free_ca: @@ -86,6 +108,7 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) { struct cpuacct *ca = css_ca(css); + free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); kfree(ca); @@ -289,6 +312,125 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } +#ifdef CONFIG_SCHED_SLI +#ifndef arch_idle_time +#define arch_idle_time(cpu) 0 +#endif + +static inline struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, + struct task_group *tg, struct cpuacct_usage_result *res) +{ + struct kernel_cpustat *kcpustat; + u64 *cpuusage; + struct cpuacct_prev_cputime *prev_cputime; + struct task_cputime cputime; + u64 tick_user, tick_nice, tick_sys, left, right; + struct sched_entity *se; + + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + if (unlikely(!tg)) { + memset(res, 0, sizeof(*res)); + return; + } + + cpuusage = per_cpu_ptr(ca->cpuusage, cpu); + + se = tg->se[cpu]; + prev_cputime = per_cpu_ptr(ca->prev_cputime, cpu); + tick_user = kcpustat->cpustat[CPUTIME_USER]; + tick_nice = kcpustat->cpustat[CPUTIME_NICE]; + tick_sys = kcpustat->cpustat[CPUTIME_SYSTEM]; + + /* Calculate system run time */ + cputime.sum_exec_runtime = *cpuusage; + cputime.utime = tick_user + tick_nice; + cputime.stime = tick_sys; + cputime_adjust(&cputime, &prev_cputime->prev_cputime1, &left, &right); + res->system = right; + + /* Calculate user and nice run time */ + cputime.sum_exec_runtime = left; /* user + nice */ + cputime.utime = tick_user; + cputime.stime = tick_nice; + cputime_adjust(&cputime, &prev_cputime->prev_cputime2, &left, &right); + res->user = left; + res->nice = right; + + res->irq = kcpustat->cpustat[CPUTIME_IRQ]; + res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; + if (se) + res->steal = __schedstats_from_se(se)->wait_sum; + else + res->steal = 0; + res->guest = res->guest_nice = 0; /* currently always 0 */ +} + +static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + struct cgroup *cgrp = seq_css(sf)->cgroup; + u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + int cpu; + + user = nice = system = idle = iowait = + irq = softirq = steal = guest = 0; + + if (ca != &root_cpuacct) { + struct cpuacct_usage_result res; + + for_each_possible_cpu(cpu) { + rcu_read_lock(); + __cpuacct_get_usage_result(ca, cpu, + cgroup_tg(cgrp), &res); + rcu_read_unlock(); + + user += res.user; + nice += res.nice; + system += res.system; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + iowait += res.iowait; + idle += res.idle; + } + } else { + struct kernel_cpustat *kcpustat; + + for_each_possible_cpu(cpu) { + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + user += kcpustat->cpustat[CPUTIME_USER]; + nice += kcpustat->cpustat[CPUTIME_NICE]; + system += kcpustat->cpustat[CPUTIME_SYSTEM]; + irq += kcpustat->cpustat[CPUTIME_IRQ]; + softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; + guest += kcpustat->cpustat[CPUTIME_GUEST]; + idle += get_idle_time(kcpustat, cpu); + iowait += get_iowait_time(kcpustat, cpu); + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + } + } + + seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); + seq_printf(sf, "nice %lld\n", nsec_to_clock_t(nice)); + seq_printf(sf, "system %lld\n", nsec_to_clock_t(system)); + seq_printf(sf, "idle %lld\n", nsec_to_clock_t(idle)); + seq_printf(sf, "iowait %lld\n", nsec_to_clock_t(iowait)); + seq_printf(sf, "irq %lld\n", nsec_to_clock_t(irq)); + seq_printf(sf, "softirq %lld\n", nsec_to_clock_t(softirq)); + seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); + seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + + return 0; +} +#endif + static struct cftype files[] = { { .name = "usage", @@ -323,6 +465,12 @@ static struct cftype files[] = { .name = "stat", .seq_show = cpuacct_stats_show, }, +#ifdef CONFIG_SCHED_SLI + { + .name = "proc_stat", + .seq_show = cpuacct_proc_stats_show, + }, +#endif { } /* terminate */ }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2862e421e411..562d0bbcc060 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3568,4 +3568,9 @@ static inline void init_sched_mm_cid(struct task_struct *t) { } extern u64 avg_vruntime(struct cfs_rq *cfs_rq); extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); +#ifdef CONFIG_SCHED_SLI +extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); +extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +#endif + #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From 52ea035aa5d139fa3e8f689d9c7697abdf29511b Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 6 Nov 2019 20:13:02 +0800 Subject: [PATCH 799/953] anolis: cpuacct/proc_stat: Consider isolcpus ANBZ: #8657 When "isolcpus=" is passed, skip all its accountings. Signed-off-by: Xunlei Pang Tested-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 67b6c7817397..8ae703c442a1 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -385,6 +385,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct_usage_result res; for_each_possible_cpu(cpu) { + if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN)) + continue; + rcu_read_lock(); __cpuacct_get_usage_result(ca, cpu, cgroup_tg(cgrp), &res); -- Gitee From aec48f1eaced337dd70ce4ca261d9ada4b37869d Mon Sep 17 00:00:00 2001 From: Shanpei Chen Date: Thu, 3 Jun 2021 18:44:29 +0800 Subject: [PATCH 800/953] anolis: sched/cputime: Fix guest cputime of cpuacct.proc_stat ANBZ: #8657 For container only cases, since guest cputime is always 0, we don't calculate it and return 0 directly before. Howerver, when running vm inside a cgroup, we expect the cgroup to maintain guest cputime correctly. Signed-off-by: Shanpei Chen Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 5 ++++- kernel/sched/cputime.c | 6 ++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 8ae703c442a1..b3f33c8ae160 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -368,7 +368,8 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->steal = __schedstats_from_se(se)->wait_sum; else res->steal = 0; - res->guest = res->guest_nice = 0; /* currently always 0 */ + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; + res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) @@ -400,6 +401,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) softirq += res.softirq; steal += res.steal; guest += res.guest; + guest += res.guest_nice; iowait += res.iowait; idle += res.idle; } @@ -414,6 +416,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) irq += kcpustat->cpustat[CPUTIME_IRQ]; softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; guest += kcpustat->cpustat[CPUTIME_GUEST]; + guest += kcpustat->cpustat[CPUTIME_GUEST_NICE]; idle += get_idle_time(kcpustat, cpu); iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 2a6701040cf6..27dd9be809dc 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -142,8 +142,6 @@ void account_user_time(struct task_struct *p, u64 cputime) */ void account_guest_time(struct task_struct *p, u64 cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; - /* Add guest time to process. */ p->utime += cputime; account_group_user_time(p, cputime); @@ -152,10 +150,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { task_group_account_field(p, CPUTIME_NICE, cputime); - cpustat[CPUTIME_GUEST_NICE] += cputime; + task_group_account_field(p, CPUTIME_GUEST_NICE, cputime); } else { task_group_account_field(p, CPUTIME_USER, cputime); - cpustat[CPUTIME_GUEST] += cputime; + task_group_account_field(p, CPUTIME_GUEST, cputime); } } -- Gitee From f6f109f9a9177f67b4ee01860857a5d048519380 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 17 Jan 2020 01:03:44 +0800 Subject: [PATCH 801/953] anolis: cpuacct: Export nr_running & nr_uninterruptible ANBZ: #8657 cpu cgroup's nr_running and nr_uninterruptible are useful for troubleshooting. Export them in cpuacct.proc_stat. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 101 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index b3f33c8ae160..7e8b40df52de 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -323,6 +323,95 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) struct task_group, css); } +static inline unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_possible_cpu(i) + sum += cpu_rq(i)->nr_uninterruptible; + + /* + * Since we read the counters lockless, it might be slightly + * inaccurate. Do not allow it to go below zero though: + */ + if (unlikely((long)sum < 0)) + sum = 0; + + return sum; +} + +#ifdef CONFIG_CFS_BANDWIDTH +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return tg->cfs_rq[cpu]->throttle_count; +} +#else +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return false; +} +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +static inline bool tg_rt_throttled(struct task_group *tg, int cpu) +{ + return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted; +} +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu) +{ + unsigned long nr_running = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return 0; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out; + + if (!tg_cfs_throttled(tg, cpu)) + nr_running += tg->cfs_rq[cpu]->h_nr_running; +#ifdef CONFIG_RT_GROUP_SCHED + if (!tg_rt_throttled(tg, cpu)) + nr_running += tg->rt_rq[cpu]->rt_nr_running; +#endif + /* SCHED_DEADLINE doesn't support cgroup yet */ + +out: + rcu_read_unlock(); + return nr_running; +} + +static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) +{ + unsigned long nr = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return nr; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out_rcu_unlock; + + nr = tg->cfs_rq[cpu]->nr_uninterruptible; +#ifdef CONFIG_RT_GROUP_SCHED + nr += tg->rt_rq[cpu]->nr_uninterruptible; +#endif + +out_rcu_unlock: + rcu_read_unlock(); + return nr; +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -377,6 +466,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + unsigned long nr_run = 0, nr_uninter = 0; int cpu; user = nice = system = idle = iowait = @@ -404,6 +494,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) guest += res.guest_nice; iowait += res.iowait; idle += res.idle; + + nr_run += ca_running(ca, cpu); + nr_uninter += ca_uninterruptible(ca, cpu); } } else { struct kernel_cpustat *kcpustat; @@ -421,6 +514,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; } + + nr_run = nr_running(); + nr_uninter = nr_uninterruptible(); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -433,6 +529,11 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); + if ((long) nr_uninter < 0) + nr_uninter = 0; + seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + return 0; } #endif -- Gitee From 13f759898e5424f604c2a9f41aaa3e4aed18ffba Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 11:18:37 +0800 Subject: [PATCH 802/953] anolis: sched: Introduce per-cgroup idle accounting ANBZ: #8657 Since we concern idle, let's take idle as the center state. And omit transition between other stats. Below is the state transition graph: sleep->deque +-----------+ cpumask +-------+ exit->deque +-------+ |ineffective|-------- | idle | <-----------|running| +-----------+ +-------+ +-------+ ^ | unthrtl child -> deque | | wake -> deque | |thrtl chlid -> enque migrate -> deque | |migrate -> enque | v +-------+ | steal | +-------+ We conclude idle state condition as: !se->on_rq && !my_q->throttled && cpu allowed. From this graph and condition, we can hook (de|en)queue_task_fair update_cpumasks_hier, (un|)throttle_cfs_rq to account idle state. In the hooked functions, we also check the conditions, to avoid accounting unwanted cpu clocks. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/cgroup.h | 12 +++++ include/linux/sched.h | 4 ++ kernel/cgroup/cpuset.c | 16 +++++++ kernel/sched/cpuacct.c | 100 +++++++++++++++++++++++++++++++++++++++-- kernel/sched/fair.c | 29 +++++++++++- kernel/sched/sched.h | 5 +++ 6 files changed, 161 insertions(+), 5 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f1219e29d309..5eda25d7c3a5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -857,4 +857,16 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +#ifdef CONFIG_SCHED_SLI +void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added); +void cgroup_idle_end(struct sched_entity *se); +void cgroup_idle_start(struct sched_entity *se); +#else +static inline void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added) { } +static inline void cgroup_idle_end(struct sched_entity *se) { } +static inline void cgroup_idle_start(struct sched_entity *se) { } +#endif + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4736b0902102..da0fd92af229 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -572,6 +572,10 @@ struct sched_entity { s64 vlag; u64 slice; + u64 cg_idle_start; + u64 cg_idle_sum; + seqlock_t idle_seqlock; + u64 nr_migrations; #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index f5626ccdc786..a0edf8070dc3 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1600,6 +1600,8 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, return 0; } +static struct cpumask added, deleted, old_cpus; + /* * update_cpumasks_hier() flags */ @@ -1656,6 +1658,11 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, parent->child_ecpus_count--; } + if (cpumask_empty(cp->effective_cpus)) + cpumask_copy(&old_cpus, parent->effective_cpus); + else + cpumask_copy(&old_cpus, cp->effective_cpus); + /* * Skip the whole subtree if * 1) the cpumask remains the same, @@ -1747,8 +1754,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); + /* add = new - old = new & (~old) */ + cpumask_andnot(&added, tmp->new_cpus, &old_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, &added); + update_tasks_cpumask(cp, tmp->new_cpus); + /* deleted = old - new = old & (~new) */ + cpumask_andnot(&deleted, &old_cpus, tmp->new_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, &deleted, NULL); + /* * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE * from parent if current cpuset isn't a valid partition root @@ -3328,6 +3343,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, cs->effective_cpus); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 7e8b40df52de..12a455867d0a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -412,6 +412,84 @@ static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) return nr; } +void cgroup_idle_start(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + __schedstat_set(se->cg_idle_start, clock); + write_sequnlock(&se->idle_seqlock); + + local_irq_restore(flags); +} + +void cgroup_idle_end(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + u64 idle_start; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + idle_start = schedstat_val(se->cg_idle_start); + __schedstat_add(se->cg_idle_sum, clock - idle_start); + __schedstat_set(se->cg_idle_start, 0); + write_sequnlock(&se->idle_seqlock); + + local_irq_restore(flags); +} + +void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, + struct cpumask *added) +{ + struct task_group *tg; + struct sched_entity *se; + int cpu; + + if (!schedstat_enabled()) + return; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + + if (!tg) { + rcu_read_unlock(); + return; + } + + if (added) { + /* Mark newly added cpus as newly-idle */ + for_each_cpu(cpu, added) { + se = tg->se[cpu]; + cgroup_idle_start(se); + } + } + + if (deleted) { + /* Mark ineffective_cpus as idle-invalid */ + for_each_cpu(cpu, deleted) { + se = tg->se[cpu]; + cgroup_idle_end(se); + } + } + + rcu_read_unlock(); +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -453,10 +531,26 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->irq = kcpustat->cpustat[CPUTIME_IRQ]; res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; - if (se) - res->steal = __schedstats_from_se(se)->wait_sum; - else + + if (se && schedstat_enabled()) { + unsigned int seq; + u64 idle_start; + u64 clock = cpu_clock(cpu); + + do { + seq = read_seqbegin(&se->idle_seqlock); + res->idle = schedstat_val(se->cg_idle_sum); + idle_start = schedstat_val(se->cg_idle_start); + clock = cpu_clock(cpu); + if (idle_start && clock > idle_start) + res->idle += clock - idle_start; + } while (read_seqretry(&se->idle_seqlock, seq)); + res->steal = 0; + } else { + res->idle = res->iowait = res->steal = 0; + } + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3321918c0e39..180bba6e9798 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5704,6 +5704,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se->on_rq) goto done; + if (se->my_q != cfs_rq) + cgroup_idle_start(se); + dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -5752,6 +5755,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { + struct cfs_rq *bottom_cfs_rq = cfs_rq; struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; @@ -5795,6 +5799,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (se->on_rq) break; + + if (se->my_q != bottom_cfs_rq) + cgroup_idle_end(se); enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -6661,6 +6668,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_end(se); + cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -6668,8 +6678,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (cfs_rq->nr_running == 1) + cgroup_idle_end(se->parent); +#endif goto enqueue_throttle; + } flags = ENQUEUE_WAKEUP; } @@ -6739,6 +6754,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_start(se); + cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; @@ -6746,8 +6764,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (!cfs_rq->nr_running) + cgroup_idle_start(se->parent); +#endif goto dequeue_throttle; + } /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -12952,6 +12975,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; + seqlock_init(&se->idle_seqlock); + se->cg_idle_start = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 562d0bbcc060..b67bae32dc7f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1503,6 +1503,11 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) extern void update_rq_clock(struct rq *rq); +static inline u64 __rq_clock_broken(struct rq *rq) +{ + return READ_ONCE(rq->clock); +} + /* * rq::clock_update_flags bits * -- Gitee From 3960616999c48f305eb5875513d71c68216f8771 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 14:32:01 +0800 Subject: [PATCH 803/953] anolis: sched: Introduce per-cgroup steal accounting ANBZ: #8657 From the previous patch. We know there are 4 possible states. Since steal state's transition is complex. We choose to account its supplement. steal = elapse - idle - sum_exec_raw - ineffective Where elapse is the time since the cgroup is created. sum_exec_raw is the running time including IRQ time. ineffective is the total time that the cpuacct-binded cpuset doesn't allow this cpu for the cgroup. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 6 ++++++ kernel/sched/cpuacct.c | 22 ++++++++++++++++++++-- kernel/sched/fair.c | 13 ++++++++++++- 3 files changed, 38 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index da0fd92af229..52e4bc239ed2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -572,8 +572,14 @@ struct sched_entity { s64 vlag; u64 slice; + /* irq time is included */ + u64 exec_start_raw; + u64 sum_exec_raw; u64 cg_idle_start; u64 cg_idle_sum; + u64 cg_init_time; + u64 cg_ineffective_sum; + u64 cg_ineffective_start; seqlock_t idle_seqlock; u64 nr_migrations; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 12a455867d0a..339b1c4069d4 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -476,6 +476,10 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, for_each_cpu(cpu, added) { se = tg->se[cpu]; cgroup_idle_start(se); + __schedstat_add(se->cg_ineffective_sum, + __rq_clock_broken(cpu_rq(cpu)) - + se->cg_ineffective_start); + __schedstat_set(se->cg_ineffective_start, 0); } } @@ -484,6 +488,9 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, for_each_cpu(cpu, deleted) { se = tg->se[cpu]; cgroup_idle_end(se); + /* Use __rq_clock_broken to avoid warning */ + __schedstat_set(se->cg_ineffective_start, + __rq_clock_broken(cpu_rq(cpu))); } } @@ -534,8 +541,8 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (se && schedstat_enabled()) { unsigned int seq; - u64 idle_start; - u64 clock = cpu_clock(cpu); + u64 idle_start, ineff, ineff_start, elapse, complement; + u64 clock; do { seq = read_seqbegin(&se->idle_seqlock); @@ -546,7 +553,18 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->idle += clock - idle_start; } while (read_seqretry(&se->idle_seqlock, seq)); + ineff = schedstat_val(se->cg_ineffective_sum); + ineff_start = schedstat_val(se->cg_ineffective_start); + if (ineff_start) + __schedstat_add(ineff, clock - ineff_start); + res->steal = 0; + + elapse = clock - schedstat_val(se->cg_init_time); + complement = res->idle + se->sum_exec_raw + ineff; + if (elapse > complement) + res->steal = elapse - complement; + } else { res->idle = res->iowait = res->steal = 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 180bba6e9798..7301c4cf3241 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1153,6 +1153,15 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) } #endif /* CONFIG_SMP */ +static inline void +update_exec_raw(struct cfs_rq *cfs_rq, struct sched_entity *curr) +{ + u64 now = rq_clock(rq_of(cfs_rq)); + + curr->sum_exec_raw += now - curr->exec_start_raw; + curr->exec_start_raw = now; +} + /* * Update the current task's runtime statistics. */ @@ -1195,6 +1204,7 @@ static void update_curr(struct cfs_rq *cfs_rq) } account_cfs_rq_runtime(cfs_rq, delta_exec); + update_exec_raw(cfs_rq, curr); } static void update_curr_fair(struct rq *rq) @@ -1321,6 +1331,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); + se->exec_start_raw = rq_clock(rq_of(cfs_rq)); } /************************************************** @@ -12976,7 +12987,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; seqlock_init(&se->idle_seqlock); - se->cg_idle_start = cpu_clock(cpu); + se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); -- Gitee From a70d2bff19c01709a5d557a4b98175f91e243fd7 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 16:31:35 +0800 Subject: [PATCH 804/953] anolis: sched: Introduce per-cgroup iowait accounting ANBZ: #8657 We account iowait when the cgroup's se is idle, and it has blocked task on the hierarchy of se->my_q. To achieve this, we also add cg_nr_running to track the hierarchical number of blocked tasks. We do it when a blocked task wakes up or a task is blocked. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 4 ++++ kernel/sched/core.c | 3 +++ kernel/sched/cpuacct.c | 26 +++++++++++++++++++++-- kernel/sched/fair.c | 47 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 6 ++++++ 5 files changed, 84 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 52e4bc239ed2..55d9768ace8a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -578,9 +578,13 @@ struct sched_entity { u64 cg_idle_start; u64 cg_idle_sum; u64 cg_init_time; + u64 cg_nr_iowait; + u64 cg_iowait_sum; + u64 cg_iowait_start; u64 cg_ineffective_sum; u64 cg_ineffective_start; seqlock_t idle_seqlock; + spinlock_t iowait_lock; u64 nr_migrations; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6e39e68d88be..47370c27889b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3808,6 +3808,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } activate_task(rq, p, en_flags); @@ -4371,6 +4372,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } wake_flags |= WF_MIGRATED; @@ -6831,6 +6833,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) if (prev->in_iowait) { atomic_inc(&rq->nr_iowait); + update_nr_iowait(prev, 1); delayacct_blkio_start(); } } diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 339b1c4069d4..093d16cf3daa 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -428,6 +428,11 @@ void cgroup_idle_start(struct sched_entity *se) __schedstat_set(se->cg_idle_start, clock); write_sequnlock(&se->idle_seqlock); + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) + __schedstat_set(se->cg_iowait_start, clock); + spin_unlock(&se->iowait_lock); + local_irq_restore(flags); } @@ -435,7 +440,7 @@ void cgroup_idle_end(struct sched_entity *se) { unsigned long flags; u64 clock; - u64 idle_start; + u64 idle_start, iowait_start; if (!schedstat_enabled()) return; @@ -450,6 +455,14 @@ void cgroup_idle_end(struct sched_entity *se) __schedstat_set(se->cg_idle_start, 0); write_sequnlock(&se->idle_seqlock); + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) { + iowait_start = schedstat_val(se->cg_iowait_start); + __schedstat_add(se->cg_iowait_sum, clock - iowait_start); + __schedstat_set(se->cg_iowait_start, 0); + } + spin_unlock(&se->iowait_lock); + local_irq_restore(flags); } @@ -541,8 +554,9 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (se && schedstat_enabled()) { unsigned int seq; + unsigned long flags; u64 idle_start, ineff, ineff_start, elapse, complement; - u64 clock; + u64 clock, iowait_start; do { seq = read_seqbegin(&se->idle_seqlock); @@ -558,6 +572,13 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (ineff_start) __schedstat_add(ineff, clock - ineff_start); + spin_lock_irqsave(&se->iowait_lock, flags); + res->iowait = schedstat_val(se->cg_iowait_sum); + iowait_start = schedstat_val(se->cg_iowait_start); + if (iowait_start) + __schedstat_add(res->iowait, clock - iowait_start); + spin_unlock_irqrestore(&se->iowait_lock, flags); + res->steal = 0; elapse = clock - schedstat_val(se->cg_init_time); @@ -565,6 +586,7 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (elapse > complement) res->steal = elapse - complement; + res->idle -= res->iowait; } else { res->idle = res->iowait = res->steal = 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7301c4cf3241..48e174183343 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12834,6 +12834,46 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) } #ifdef CONFIG_FAIR_GROUP_SCHED + +#ifdef CONFIG_SCHED_SLI +static void update_nr_iowait_fair(struct task_struct *p, long inc) +{ + unsigned long flags; + struct sched_entity *se = p->se.parent; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(cpu_rq(task_cpu(p))); + + for_each_sched_entity(se) { + /* + * Avoid locking rq->lock from try_to_wakeup hot path, in + * the price of poor consistency among cgroup hierarchy, + * which we can tolerate. + * While accessing se->on_rq does need to hold rq->lock. We + * already do, because when inc==1, the caller is __schedule + * and task_move_group_fair + */ + spin_lock_irqsave(&se->iowait_lock, flags); + if (!se->on_rq && !schedstat_val(se->cg_nr_iowait) && inc > 0) + __schedstat_set(se->cg_iowait_start, clock); + if (schedstat_val(se->cg_iowait_start) > 0 && + schedstat_val(se->cg_nr_iowait) + inc == 0) { + __schedstat_add(se->cg_iowait_sum, clock - + schedstat_val(se->cg_iowait_start)); + __schedstat_set(se->cg_iowait_start, 0); + } + __schedstat_add(se->cg_nr_iowait, inc); + spin_unlock_irqrestore(&se->iowait_lock, flags); + } +} +#else +static void update_nr_iowait_fair(struct task_struct *p, long inc) {} +#endif + + static void task_change_group_fair(struct task_struct *p) { /* @@ -12843,6 +12883,9 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; + if (p->in_iowait) + update_nr_iowait_fair(p, -1); + detach_task_cfs_rq(p); #ifdef CONFIG_SMP @@ -12851,6 +12894,8 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); + if (p->in_iowait) + update_nr_iowait_fair(p, 1); } void free_fair_sched_group(struct task_group *tg) @@ -12987,6 +13032,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; seqlock_init(&se->idle_seqlock); + spin_lock_init(&se->iowait_lock); se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } @@ -13205,6 +13251,7 @@ DEFINE_SCHED_CLASS(fair) = { #endif #ifdef CONFIG_SCHED_SLI .update_nr_uninterruptible = update_nr_uninterruptible_fair, + .update_nr_iowait = update_nr_iowait_fair, #endif #ifdef CONFIG_UCLAMP_TASK diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b67bae32dc7f..55b26da8eb98 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2335,6 +2335,7 @@ struct sched_class { int (*task_is_throttled)(struct task_struct *p, int cpu); #endif void (*update_nr_uninterruptible)(struct task_struct *p, long inc); + void (*update_nr_iowait)(struct task_struct *p, long inc); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -2348,6 +2349,11 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void update_nr_iowait(struct task_struct *p, long inc) +{ + if (p->sched_class->update_nr_iowait) + p->sched_class->update_nr_iowait(p, inc); +} /* * Helper to define a sched_class instance; each one is placed in a separate -- Gitee From 5553eb6220d7965206be242b4c3fb101ce88dafb Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 10 Sep 2021 20:44:15 +0800 Subject: [PATCH 805/953] anolis: sched: Fix cg_nr_iowait race condition ANBZ: #8657 Consider p->in_iowait and p->on_rq. Intuitively, they seem to be exclusive. But there are actually exact two exceptions: 1) and 3) in the time series: +----------------+ | |io_schedule_... | | |in_iowait=1 | | +----------------+ | | | 1) sees in_iowait=1 & on_rq=1 | +-rq_lock--------+ | | | | | __schedule out:| | | nr_iowait ++ | | | deactivate | | | on_rq = 0 | | +----------------+ | | | 2) sees in_iowait=1 & on_rq=0 | +----------------+ | | try_to_wake_up:| | | on_rq = 1 | | | nr_iowait -- | | +----------------+ | | | 3) sees in_iowait=1 & on_rq=1 | | +-rq_lock--------+ | | __schedule in: | | +----------------+ | +----------------+ | |in_iowait=0 | | +----------------+ | In case 1, there's a window between updating in_iowait and on_rq. And this window is not protected by rq->lock or p->pi_lock. And case 3 it's more obvious. on_rq is updated by try_to_wake_up, while in_iowait is updated by io_schedule in the near future. case 2 is a trivial one (meaning it's not buggy) In conclusion, there's no guarantee atomically updating on_rq and in_iowait. Next, we concentrate on nr_iowait, and take task_move_group_fair into consideration too. ==== case 1 ==== without bugfix: old cgroup new cgroup | task_move_group_fair -- -> -1 ++ -> 1 | __schedule out ++ -> 2 v try_to_wake_up -- -> 1 with bugfix: old cgroup new cgroup | task_move_group_fair | __schedule out ++ -> 1 v try_to_wake_up -- -> 0 ==== case 3 ==== without bugfix: old cgroup new cgroup | __schedule out ++ -> 1 | try_to_wake_up -- -> 0 v task_move_group_fair -- -> -1 ++ -> 1 with bugfix: old cgroup new cgroup | __schedule out ++ -> 1 | try_to_wake_up -- -> 0 v task_move_group_fair The bugfix is simple, use !on_rq to filter out case 1 and 3, since they are false-positive cases and should not update nr_iowait. Signed-off-by: Yihao Wu Signed-off-by: Yi Tao --- kernel/sched/fair.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 48e174183343..42bacd5e5dd6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12883,7 +12883,20 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; - if (p->in_iowait) + /* + * p->in_iowait is obvious. If p is in_iowait, we should transfer + * iowait to the new cgroup, otherwise try_to_wake_up will decrease + * from the new cgroup, leaving old cgroup's nr_iowait to be 1, and + * new cgroup's nr_iowait to be -1 + * + * !p->on_rq is necessary too, because iowait and on_rq are not + * updated at the same time. After try_to_wake_up, p->in_iowait + * remains 1, while on_rq becomes 1. In this case, p is not at all + * in_iowait already, so don't be stupid to transfer nr_iowait. + * Similarly, when io_schedule, there's a window between setting + * p->in_iowait to 1 and setting p->on_rq to 0, don't either. + */ + if (p->in_iowait && !p->on_rq) update_nr_iowait_fair(p, -1); detach_task_cfs_rq(p); @@ -12894,7 +12907,8 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); - if (p->in_iowait) + /* Same as above */ + if (p->in_iowait && !p->on_rq) update_nr_iowait_fair(p, 1); } -- Gitee From 99b79c3163bdaf6e0bf353916bf5e25243cbe092 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 16:57:15 +0800 Subject: [PATCH 806/953] anolis: configs: Enable rich container ANBZ: #8657 Enable CONFIG_RICH_CONTAINER in all configure files, for both x86 and arm64. It is off by default, to turn it on: echo 1 > /proc/sys/kernel/rich_container_enable Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + init/Kconfig | 14 ++++++++++++++ 5 files changed, 18 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index a9dd69260bb5..65ebc3694ef2 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -196,6 +196,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 1935cdb56003..a563537ca907 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -195,6 +195,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ba7a304585a4..6f4e977693dd 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -215,6 +215,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 1595b3a3616d..47269a2f1c76 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -214,6 +214,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/init/Kconfig b/init/Kconfig index 61c7ed19fa7b..2c9af56b108a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1139,6 +1139,20 @@ config SCHED_SLI Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted together to take effect. +config RICH_CONTAINER + bool "Alibaba rich container" + depends on CGROUP_CPUACCT + depends on CFS_BANDWIDTH + depends on CPUSETS + default n + help + Make containers feel like VMs. Change the following interface + to reflect the information from containers, like: + "/proc/cpuinfo", "/proc/meminfo", "/sys/devices/system/cpu/online". + Then tools(i.e. top, free) can work in containers as in VMs. + Note that it requires "cpu,cpuacct,cpuset" shared v1 hierarchy to + work properly. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help -- Gitee From d80179c67c46f5380296298cfb0ae38efe26e7f3 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 17:19:11 +0800 Subject: [PATCH 807/953] anolis: x86: cpuinfo: Add cpuinfo support for rich container ANBZ: #8657 Make /proc/cpuinfo container aware. E.g. cpuset.cpus is 4-7, then it will show as the faked cpu0~cpu3 from the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/x86/kernel/cpu/proc.c | 32 +++++++++++++++------- include/linux/pid_namespace.h | 12 +++++++++ include/linux/sched.h | 13 +++++++++ kernel/sched/cpuacct.c | 50 +++++++++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index a0f81db51eac..2941134c47da 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -17,14 +17,22 @@ extern const char * const x86_vmx_flags[NVMXINTS*32]; * Get CPU information for use by the procfs. */ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) + unsigned int cpu, unsigned int index, + bool rich_container, unsigned int total) { #ifdef CONFIG_SMP - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpumask_weight(topology_core_cpumask(cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + if (rich_container) { + seq_puts(m, "physical id\t: 0\n"); + seq_printf(m, "siblings\t: %d\n", total); + seq_printf(m, "core id\t\t: %d\n", index); + seq_printf(m, "cpu cores\t: %d\n", total); + } else { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } seq_printf(m, "apicid\t\t: %d\n", c->apicid); seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); #endif @@ -63,16 +71,20 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu; + unsigned int cpu, index, total; int i; + bool rich_container = false; + + index = cpu = c->cpu_index; + if (check_rich_container(cpu, &index, &rich_container, &total)) + return 0; - cpu = c->cpu_index; seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %u\n" "model name\t: %s\n", - cpu, + index, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, c->x86_model, @@ -95,7 +107,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size) seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); - show_cpuinfo_core(m, c, cpu); + show_cpuinfo_core(m, c, cpu, index, rich_container, total); show_cpuinfo_misc(m, c); seq_puts(m, "flags\t\t:"); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6..ea8b24c3b4ec 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -123,4 +123,16 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) return task_active_pid_ns(tsk) == &init_pid_ns; } +#ifdef CONFIG_RICH_CONTAINER +static inline bool in_rich_container(struct task_struct *tsk) +{ + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); +} +#else +static inline bool in_rich_container(struct task_struct *tsk) +{ + return false; +} +#endif + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 55d9768ace8a..e2d5c0fe053a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2526,4 +2526,17 @@ struct cpuacct_usage_result { u64 steal, iowait, idle, guest, guest_nice; }; +#ifdef CONFIG_RICH_CONTAINER +bool child_cpuacct(struct task_struct *tsk); +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total); + +#else +static inline bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + return false; +} +#endif + #endif diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 093d16cf3daa..1daefd079ea5 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -750,3 +750,53 @@ struct cgroup_subsys cpuacct_cgrp_subsys = { .legacy_cftypes = files, .early_init = true, }; + +#ifdef CONFIG_RICH_CONTAINER +bool child_cpuacct(struct task_struct *tsk) +{ + struct cpuacct *ca = task_ca(tsk); + + if (ca && ca != &root_cpuacct) + return true; + + return false; +} + +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + struct cpumask cpuset_allowed; + struct task_struct *init_tsk; + bool in_rich; + int i, id = 0; + + rcu_read_lock(); + in_rich = in_rich_container(current); + rcu_read_unlock(); + if (!in_rich) + return false; + + *rich_container = true; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + put_task_struct(init_tsk); + + *total = cpumask_weight(&cpuset_allowed); + if (cpumask_test_cpu(cpu, &cpuset_allowed)) { + for_each_cpu(i, &cpuset_allowed) { + if (i == cpu) + break; + id++; + } + *index = id; + return false; + } + + /* Hide this cpu in the container */ + return true; +} +#endif -- Gitee From a2978206d6d9f40be38dc7f7fa66331b7aeb723d Mon Sep 17 00:00:00 2001 From: zou cao Date: Mon, 23 Nov 2020 10:56:02 +0800 Subject: [PATCH 808/953] anolis: arm64: cpuinfo: Add cpuinfo support for rich container ANBZ: #8657 add arm64 cpuinfo support for rich container Signed-off-by: zou cao Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/kernel/cpuinfo.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535..07ee88abfa0d 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -178,17 +178,24 @@ static int c_show(struct seq_file *m, void *v) { int i, j; bool compat = personality(current->personality) == PER_LINUX32; + unsigned int cpu, index, total; + bool rich_container = false; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); u32 midr = cpuinfo->reg_midr; + index = cpu = i; + + if (check_rich_container(cpu, &index, &rich_container, &total)) + continue; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ - seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "processor\t: %d\n", index); if (compat) seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); -- Gitee From 7bf1924a8e2c37b3980d5fae4e7dc27f9cb2661f Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Mon, 23 Nov 2020 10:45:55 +0800 Subject: [PATCH 809/953] anolis: sysfs/cpu: Add online cpus support for rich container ANBZ: #8657 Make /sys/devices/system/cpu/online container aware. E.g. cpuset.cpus is 4-7, then it will show "4-7" in the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index ef427ee787a9..925dacc2d266 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "base.h" @@ -216,8 +218,31 @@ static ssize_t show_cpus_attr(struct device *dev, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); + struct cpumask cpuset_allowed; + struct task_struct *init_tsk; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + } else { + init_tsk = NULL; + } + rcu_read_unlock(); + + if (rich_container && !strcmp(attr->attr.name, "online")) + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + else + cpumask_copy(&cpuset_allowed, ca->map); + + if (init_tsk) + put_task_struct(init_tsk); - return cpumap_print_to_pagebuf(true, buf, ca->map); + return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } #define _CPU_ATTR(name, map) \ -- Gitee From 21f4b625a1f53d110944ff1640022a8e63c9f1f5 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sat, 5 Jun 2021 00:30:06 +0800 Subject: [PATCH 810/953] anolis: pidns: Support rich container switch on/off ANBZ: #8657 Introduce "/proc/sys/kernel/rich_container_enable" to control rich container at runtime. Default off. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/pid_namespace.h | 4 ++++ kernel/pid_namespace.c | 4 ++++ kernel/sysctl.c | 11 +++++++++++ 3 files changed, 19 insertions(+) diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index ea8b24c3b4ec..a71999081213 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -124,8 +124,12 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) } #ifdef CONFIG_RICH_CONTAINER +extern int sysctl_rich_container_enable; static inline bool in_rich_container(struct task_struct *tsk) { + if (sysctl_rich_container_enable == 0) + return false; + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); } #else diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 619972c78774..26a3517568ee 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -25,6 +25,10 @@ #include #include "pid_sysctl.h" +#ifdef CONFIG_RICH_CONTAINER +int sysctl_rich_container_enable; +#endif + static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 47bdd8216fc5..6d0bcc3c205e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2079,6 +2079,17 @@ static struct ctl_table kern_table[] = { .extra2 = SYSCTL_ONE, }, #endif /* CONFIG_SCHED_ACPU*/ +#ifdef CONFIG_RICH_CONTAINER + { + .procname = "rich_container_enable", + .data = &sysctl_rich_container_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif { } }; -- Gitee From 1a83c6f964f313d8114e15bfc8805ed1310a62e7 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 15:54:11 +0800 Subject: [PATCH 811/953] anolis: proc/stat: Add top support for rich container ANBZ: #8657 Add in_rich_container() helper to support rich container. Make /proc/stat container aware. E.g. cpuset.cpus is 4-7, then it will show 4-cpu top data in the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/stat.c | 178 ++++++++++++++++++++++++++++++----------- include/linux/sched.h | 11 +++ kernel/sched/cpuacct.c | 17 ++++ 3 files changed, 158 insertions(+), 48 deletions(-) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index ce8a751185f5..d2ec9dcddb31 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 @@ -81,13 +83,18 @@ static void show_all_irqs(struct seq_file *p) static int show_stat(struct seq_file *p, void *v) { - int i, j; + int i, j, seq = 0; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec64 boottime; + struct cpumask cpuset_allowed; + unsigned int nr_runnable = 0; + struct task_struct *init_tsk = NULL; + struct cpuacct_usage_result res; + bool rich_container; user = nice = system = idle = iowait = irq = softirq = steal = 0; @@ -96,24 +103,54 @@ static int show_stat(struct seq_file *p, void *v) /* shift boot timestamp according to the timens offset */ timens_sub_boottime(&boottime); + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + /* fix btime in containers */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; + + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) { + cpuacct_get_usage_result(init_tsk, i, &res); + user += res.user; + nice += res.nice; + system += res.system; + idle += res.idle; + iowait += res.iowait; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest_nice += res.guest_nice; + } + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + user += cpustat[CPUTIME_USER]; + nice += cpustat[CPUTIME_NICE]; + system += cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(&kcpustat, i); + iowait += get_iowait_time(&kcpustat, i); + irq += cpustat[CPUTIME_IRQ]; + softirq += cpustat[CPUTIME_SOFTIRQ]; + steal += cpustat[CPUTIME_STEAL]; + guest += cpustat[CPUTIME_GUEST]; + guest_nice += cpustat[CPUTIME_GUEST_NICE]; + } + } + rcu_read_unlock(); + for_each_possible_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - user += cpustat[CPUTIME_USER]; - nice += cpustat[CPUTIME_NICE]; - system += cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(&kcpustat, i); - iowait += get_iowait_time(&kcpustat, i); - irq += cpustat[CPUTIME_IRQ]; - softirq += cpustat[CPUTIME_SOFTIRQ]; - steal += cpustat[CPUTIME_STEAL]; - guest += cpustat[CPUTIME_GUEST]; - guest_nice += cpustat[CPUTIME_GUEST_NICE]; - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -136,40 +173,85 @@ static int show_stat(struct seq_file *p, void *v) seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); - for_each_online_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = cpustat[CPUTIME_USER]; - nice = cpustat[CPUTIME_NICE]; - system = cpustat[CPUTIME_SYSTEM]; - idle = get_idle_time(&kcpustat, i); - iowait = get_iowait_time(&kcpustat, i); - irq = cpustat[CPUTIME_IRQ]; - softirq = cpustat[CPUTIME_SOFTIRQ]; - steal = cpustat[CPUTIME_STEAL]; - guest = cpustat[CPUTIME_GUEST]; - guest_nice = cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); - seq_putc(p, '\n'); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) { + cpuacct_get_usage_result(init_tsk, i, &res); + + seq_printf(p, "cpu%d", seq++); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.user)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.nice)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.system)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.idle)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.iowait)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.irq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.softirq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.steal)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest_nice)); + seq_putc(p, '\n'); + } + } else { + for_each_online_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + user = cpustat[CPUTIME_USER]; + nice = cpustat[CPUTIME_NICE]; + system = cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(&kcpustat, i); + iowait = get_iowait_time(&kcpustat, i); + irq = cpustat[CPUTIME_IRQ]; + softirq = cpustat[CPUTIME_SOFTIRQ]; + steal = cpustat[CPUTIME_STEAL]; + guest = cpustat[CPUTIME_GUEST]; + guest_nice = cpustat[CPUTIME_GUEST_NICE]; + + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + } } + rcu_read_unlock(); + seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); show_all_irqs(p); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) + nr_runnable += task_ca_running(init_tsk, i); + } else + nr_runnable = nr_running(); + rcu_read_unlock(); + + if (rich_container) + put_task_struct(init_tsk); + seq_printf(p, "\nctxt %llu\n" "btime %llu\n" @@ -179,7 +261,7 @@ static int show_stat(struct seq_file *p, void *v) nr_context_switches(), (unsigned long long)boottime.tv_sec, total_forks, - nr_running(), + nr_runnable, nr_iowait()); seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); diff --git a/include/linux/sched.h b/include/linux/sched.h index e2d5c0fe053a..55b127caa406 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2528,10 +2528,21 @@ struct cpuacct_usage_result { #ifdef CONFIG_RICH_CONTAINER bool child_cpuacct(struct task_struct *tsk); +void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, + struct cpuacct_usage_result *res); +unsigned long task_ca_running(struct task_struct *tsk, int cpu); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); #else +static inline void cpuacct_get_usage_result(struct task_struct *tsk, + int cpu, struct cpuacct_usage_result *res) { } + +static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) +{ + return 0; +} + static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 1daefd079ea5..18f6d31ffbbe 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -317,6 +317,13 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) #define arch_idle_time(cpu) 0 #endif +static unsigned long ca_running(struct cpuacct *ca, int cpu); + +unsigned long task_ca_running(struct task_struct *tsk, int cpu) +{ + return ca_running(task_ca(tsk), cpu); +} + static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), @@ -595,6 +602,16 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } +void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca = task_ca(tsk); + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg = cgroup_tg(cgrp); + + __cpuacct_get_usage_result(ca, cpu, tg, res); +} + static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) { struct cpuacct *ca = css_ca(seq_css(sf)); -- Gitee From e7f280216f47590abf19e7dda20ce66ed1d76a02 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 17:05:24 +0800 Subject: [PATCH 812/953] anolis: proc/loadavg: Add load support for rich container ANBZ: #8657 Fetch nr_running and nr_uninterruptible from scheduler(cpu cgroup) and use them to calculate per-cgroup load like system global load. Note that calc_cgroup_load() walks the whole hierarchy and calculate load periodically, it can cause overhead if too many cpu cgroups. Make /proc/loadavg container aware. "uptime" command will show the per-cgroup load of the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/loadavg.c | 26 ++++++++++++-- include/linux/sched.h | 5 +++ kernel/sched/cpuacct.c | 81 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/loadavg.c | 2 ++ kernel/sched/sched.h | 3 ++ 5 files changed, 115 insertions(+), 2 deletions(-) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 817981e57223..7205049d2935 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -9,19 +9,41 @@ #include #include #include +#include #include "internal.h" static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; + unsigned int nr_R = 0; + struct cpumask cpuset_allowed; + int i; - get_avenrun(avnrun, FIXED_1/200, 0); + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0); + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) + nr_R += task_ca_running(init_tsk, i); + put_task_struct(init_tsk); + } else { + get_avenrun(avnrun, FIXED_1/200, 0); + nr_R = nr_running(); + } + rcu_read_unlock(); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + nr_R, nr_threads, idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 55b127caa406..91dea8b2bb81 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2531,6 +2531,8 @@ bool child_cpuacct(struct task_struct *tsk); void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, struct cpuacct_usage_result *res); unsigned long task_ca_running(struct task_struct *tsk, int cpu); +void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, + unsigned long offset, int shift); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); @@ -2543,6 +2545,9 @@ static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) return 0; } +static inline void get_cgroup_avenrun(struct task_struct *tsk, + unsigned long *loads, unsigned long offset, int shift) { } + static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 18f6d31ffbbe..61c6e962cc81 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,7 @@ struct cpuacct { u64 __percpu *cpuusage; struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; + unsigned long avenrun[3]; }; static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) @@ -319,6 +320,23 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) static unsigned long ca_running(struct cpuacct *ca, int cpu); +static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, + unsigned long offset, int shift) +{ + unsigned long *avenrun; + + avenrun = ca->avenrun; + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} + +void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, + unsigned long offset, int shift) +{ + __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift); +} + unsigned long task_ca_running(struct task_struct *tsk, int cpu) { return ca_running(task_ca(tsk), cpu); @@ -517,6 +535,57 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, rcu_read_unlock(); } +static void cpuacct_calc_load(struct cpuacct *acct) +{ + if (acct != &root_cpuacct) { + long active = 0; + int cpu; + + for_each_possible_cpu(cpu) { + active += ca_running(acct, cpu); + active += ca_uninterruptible(acct, cpu); + } + active = active > 0 ? active * FIXED_1 : 0; + acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); + acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); + acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + } else { + acct->avenrun[0] = avenrun[0]; + acct->avenrun[1] = avenrun[1]; + acct->avenrun[2] = avenrun[2]; + } +} + +/* + * Currently we walk the whole cpuacct tree to perform per-cgroup + * load calculation, but it can cause overhead if there're too many + * cgroups. + * + * TODO: A better way to avoid the possible overhead. + * Consider NO_HZ. + */ +void calc_cgroup_load(void) +{ + struct cgroup_subsys_state *css; + struct cpuacct *acct; + + rcu_read_lock(); + css_for_each_descendant_pre(css, &root_cpuacct.css) { + acct = NULL; + if (css && css_tryget(css)) + acct = container_of(css, struct cpuacct, css); + rcu_read_unlock(); + if (acct) { + cpuacct_calc_load(acct); + css_put(&acct->css); + } + rcu_read_lock(); + if (!css) + break; + } + rcu_read_unlock(); +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -617,6 +686,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + unsigned long load, avnrun[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -649,6 +719,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_run += ca_running(ca, cpu); nr_uninter += ca_uninterruptible(ca, cpu); } + + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0); } else { struct kernel_cpustat *kcpustat; @@ -668,6 +740,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_run = nr_running(); nr_uninter = nr_uninterruptible(); + + get_avenrun(avnrun, FIXED_1/200, 0); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -680,6 +754,13 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + load = LOAD_INT(avnrun[0]) * 100 + LOAD_FRAC(avnrun[0]); + seq_printf(sf, "load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[1]) * 100 + LOAD_FRAC(avnrun[1]); + seq_printf(sf, "load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[2]) * 100 + LOAD_FRAC(avnrun[2]); + seq_printf(sf, "load average(15min) %lld\n", (u64)load); + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); if ((long) nr_uninter < 0) nr_uninter = 0; diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 52c8f8226b0d..fc4bee183fce 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -371,6 +371,8 @@ void calc_global_load(void) WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); + calc_cgroup_load(); + /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals * catch up in bulk. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 55b26da8eb98..0e28246a6158 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3582,6 +3582,9 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); #ifdef CONFIG_SCHED_SLI extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +void calc_cgroup_load(void); +#else +static inline void calc_cgroup_load(void) { } #endif #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From 9f325c69761b3b462b4078b53ac4362de815a0b3 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 17:53:57 +0800 Subject: [PATCH 813/953] anolis: proc/uptime: Add uptime support for rich container ANBZ: #8657 Make /proc/uptime container aware. "uptime" in the container will show the time elapsed based on the container's initial task creation time. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/uptime.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index b5343d209381..95622fd62885 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "internal.h" static int uptime_proc_show(struct seq_file *m, void *v) @@ -17,16 +19,37 @@ static int uptime_proc_show(struct seq_file *m, void *v) u32 rem; int i; + ktime_get_boottime_ts64(&uptime); + timens_add_boottime(&uptime); + idle_nsec = 0; - for_each_possible_cpu(i) { - struct kernel_cpustat kcs; - kcpustat_cpu_fetch(&kcs, i); - idle_nsec += get_idle_time(&kcs, i); - } + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + struct cpuacct_usage_result res; - ktime_get_boottime_ts64(&uptime); - timens_add_boottime(&uptime); + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + for_each_possible_cpu(i) { + cpuacct_get_usage_result(init_tsk, i, &res); + idle_nsec += res.idle; + } + uptime = timespec64_sub(uptime, + ns_to_timespec64(init_tsk->start_time)); + put_task_struct(init_tsk); + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcs; + + kcpustat_cpu_fetch(&kcs, i); + idle_nsec += get_idle_time(&kcs, i); + } + } + rcu_read_unlock(); idle.tv_sec = div_u64_rem(idle_nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; -- Gitee From 25e1297be1421cff0e0f934c320685d0fc06cbb5 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 18:13:07 +0800 Subject: [PATCH 814/953] anolis: pidstat: Add task uptime support for rich container ANBZ: #8657 Make /proc/pid/stat task uptime container aware. "ps" in the container will show the time elapsed based on the container's initial task creation time. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/array.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fs/proc/array.c b/fs/proc/array.c index 37b8061d84bb..c9d00328bd8f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -484,6 +484,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, unsigned long rsslim = 0; unsigned long flags; int exit_code = task->exit_code; + struct task_struct *init_tsk; state = *get_task_state(task); vsize = eip = esp = 0; @@ -579,6 +580,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, start_time = nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime)); + /* + * While uptime in container is fixed to container start time, + * task start time need to be fixed too, otherwise wrong start + * time will show in "ps". + */ + rcu_read_lock(); + if (in_rich_container(current)) { + init_tsk = task_active_pid_ns(current)->child_reaper; + start_time -= nsec_to_clock_t(init_tsk->start_boottime); + } + rcu_read_unlock(); + seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); seq_puts(m, " ("); proc_task_name(m, task, false); -- Gitee From 5b3285d02f000b4d6b75a108826c7e4a60a42fc9 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 2 Aug 2019 18:12:09 +0800 Subject: [PATCH 815/953] anolis: fs,quota: Restrict privileged hardlimit in rich container ANBZ: #8657 For CAP_SYS_RESOURCE privileged users, fs quota's hardlimit is ignored. But in alibaba rich container, we expect all users including privileged ones having its quota limited. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/quota/dquot.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7a2c9b153be6..96dac4daff30 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -83,6 +83,7 @@ #include "../internal.h" /* ugh */ #include +#include /* * There are five quota SMP locks: @@ -1292,6 +1293,13 @@ static void flush_warnings(struct dquot_warn *warn) static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + if (rich_container) + return 0; return capable(CAP_SYS_RESOURCE) && (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || -- Gitee From cc9139b402bd0e9771804d68e6be061c2cd1a55a Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Sun, 6 Jun 2021 19:02:10 +0800 Subject: [PATCH 816/953] anolis: sched: Add SLI switch for cpuacct ANBZ: #8657 Add an interface to switch on/off some heavy calculation of CPU SLI features. This interface allows user to control of which cpuacct SLI needs to be tracked. Huge overhead can be reduced, when there are too many cgroups. The switch is on for rich containers and pod cgroups by default. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 6 +++ kernel/fork.c | 2 + kernel/sched/cpuacct.c | 111 +++++++++++++++++++++++++++++++++-------- 3 files changed, 98 insertions(+), 21 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 91dea8b2bb81..0b7cefd516f5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2555,4 +2555,10 @@ static inline bool check_rich_container(unsigned int cpu, unsigned int *index, } #endif +#ifdef CONFIG_SCHED_SLI +void create_rich_container_reaper(struct task_struct *tsk); +#else +static inline void create_rich_container_reaper(struct task_struct *tsk) { } +#endif + #endif diff --git a/kernel/fork.c b/kernel/fork.c index daf5c6e2c314..4474fbb6aa8b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2733,6 +2733,8 @@ __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); + if (likely(p->pid) && is_child_reaper(pid)) + create_rich_container_reaper(p); perf_event_fork(p); trace_task_newtask(p, clone_flags); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 61c6e962cc81..0f9f16b979ca 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,10 @@ struct cpuacct { u64 __percpu *cpuusage; struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; +#ifdef CONFIG_SCHED_SLI + struct list_head sli_list; + bool sli_enabled; +#endif unsigned long avenrun[3]; }; @@ -59,6 +63,57 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, }; +#ifdef CONFIG_SCHED_SLI +static DEFINE_SPINLOCK(sli_ca_lock); +LIST_HEAD(sli_ca_list); + +static void ca_enable_sli(struct cpuacct *ca, bool val) +{ + spin_lock(&sli_ca_lock); + if (val && !READ_ONCE(ca->sli_enabled)) + list_add_tail_rcu(&ca->sli_list, &sli_ca_list); + else if (!val && READ_ONCE(ca->sli_enabled)) + list_del_rcu(&ca->sli_list); + WRITE_ONCE(ca->sli_enabled, val); + spin_unlock(&sli_ca_lock); +} + +void create_rich_container_reaper(struct task_struct *tsk) +{ + struct cpuacct *ca; + struct cpuacct *parent_ca; + struct cgroup_subsys_state *css; + + if (thread_group_leader(tsk)) { + rcu_read_lock(); + css = task_css(tsk, cpuacct_cgrp_id); + ca = css_ca(css); + if (!ca || !in_rich_container(tsk)) { + rcu_read_unlock(); + return; + } + + ca_enable_sli(ca, true); + parent_ca = css_ca(css->parent); + if (parent_ca && parent_ca != &root_cpuacct) + ca_enable_sli(parent_ca, true); + rcu_read_unlock(); + } +} + +static int enable_sli_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + ca_enable_sli(css_ca(css), !!val); + return 0; +} + +static u64 enable_sli_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return READ_ONCE(css_ca(css)->sli_enabled); +} +#endif + /* Create a new CPU accounting group */ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) @@ -85,6 +140,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->prev_cputime) goto out_free_cpustat; +#ifdef CONFIG_SCHED_SLI + INIT_LIST_HEAD(&ca->sli_list); +#endif + for_each_possible_cpu(i) { prev_cputime_init( &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); @@ -104,6 +163,13 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return ERR_PTR(-ENOMEM); } +#ifdef CONFIG_SCHED_SLI +static void cpuacct_css_offline(struct cgroup_subsys_state *css) +{ + ca_enable_sli(css_ca(css), false); +} +#endif + /* Destroy an existing CPU accounting group */ static void cpuacct_css_free(struct cgroup_subsys_state *css) { @@ -557,32 +623,16 @@ static void cpuacct_calc_load(struct cpuacct *acct) } /* - * Currently we walk the whole cpuacct tree to perform per-cgroup - * load calculation, but it can cause overhead if there're too many - * cgroups. - * - * TODO: A better way to avoid the possible overhead. - * Consider NO_HZ. + * We walk cpuacct whose SLI is enabled to perform per-cgroup load calculation + * the overhead is acceptable if SLI is not enabled for most of the cgroups. */ void calc_cgroup_load(void) { - struct cgroup_subsys_state *css; - struct cpuacct *acct; + struct cpuacct *ca; rcu_read_lock(); - css_for_each_descendant_pre(css, &root_cpuacct.css) { - acct = NULL; - if (css && css_tryget(css)) - acct = container_of(css, struct cpuacct, css); - rcu_read_unlock(); - if (acct) { - cpuacct_calc_load(acct); - css_put(&acct->css); - } - rcu_read_lock(); - if (!css) - break; - } + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) + cpuacct_calc_load(ca); rcu_read_unlock(); } @@ -809,6 +859,11 @@ static struct cftype files[] = { .name = "proc_stat", .seq_show = cpuacct_proc_stats_show, }, + { + .name = "enable_sli", + .read_u64 = enable_sli_read, + .write_u64 = enable_sli_write + }, #endif { } /* terminate */ }; @@ -842,9 +897,23 @@ void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) __this_cpu_add(ca->cpustat->cpustat[index], val); } +static void cpuacct_cgroup_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(task, css, tset) + if (task->pid && is_child_reaper(task_pid(task))) + create_rich_container_reaper(task); +} + struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, +#ifdef CONFIG_SCHED_SLI + .css_offline = cpuacct_css_offline, +#endif + .attach = cpuacct_cgroup_attach, .legacy_cftypes = files, .early_init = true, }; -- Gitee From 971b9c733df1e5c8878a1cad7bb5b0dd6b88d7e3 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Sun, 6 Jun 2021 19:13:58 +0800 Subject: [PATCH 817/953] anolis: sched: introduce asynchronous cgroup load calculation. ANBZ: #8657 Nowadays there are too much cgroups with deep hierarchy, to calculate the load for all of them together take a very long time, with irq disabled. This leadinto a big performance issue in our production, so move the work into a dedicated kthread, although the work still take long time, but it won't disable the irq and preempt still possible. The sideeffect of this way is the load calc period of cpuacct will be not so precisely 5HZ, however, since the calculation itself takes too long, either way got this problem anyway. The feature is enabled by default, could turn off it with: echo 0 > /proc/async_load_calc Signed-off-by: Michael Wang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 156 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/loadavg.c | 3 +- kernel/sched/sched.h | 5 ++ 3 files changed, 163 insertions(+), 1 deletion(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0f9f16b979ca..983d97d050a6 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -35,6 +35,7 @@ struct cpuacct { #ifdef CONFIG_SCHED_SLI struct list_head sli_list; bool sli_enabled; + u64 next_load_update; #endif unsigned long avenrun[3]; }; @@ -918,6 +919,161 @@ struct cgroup_subsys cpuacct_cgrp_subsys = { .early_init = true, }; +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_FALSE(async_load_calc); + +bool async_load_calc_enabled(void) +{ + return static_branch_likely(&async_load_calc); +} + +static int async_load_calc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", async_load_calc_enabled()); + return 0; +} + +static int async_load_calc_open(struct inode *inode, struct file *file) +{ + return single_open(file, async_load_calc_show, NULL); +} + +static void async_calc_cgroup_load(void) +{ + int cnt; + struct cpuacct *ca; + +again: + cnt = 1; + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) { + unsigned long next_update = ca->next_load_update; + + /* + * Need per ca check since after break the list + * could have been changed, otherwise the loop + * will be endless. + */ + if (time_before(jiffies, next_update + 10)) + continue; + + cpuacct_calc_load(ca); + ca->next_load_update = jiffies + LOAD_FREQ; + + /* Take a break for every 100 ca */ + if (cnt++ >= 100) { + rcu_read_unlock(); + cond_resched(); + goto again; + } + } + rcu_read_unlock(); +} + +int load_calc_func(void *unsed) +{ + unsigned long next_update = jiffies + LOAD_FREQ; + + while (!kthread_should_stop()) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/5); + set_current_state(TASK_RUNNING); + + if (time_before(jiffies, next_update + 10)) + continue; + + async_calc_cgroup_load(); + next_update += LOAD_FREQ; + } + + return 0; +} + +static struct task_struct *load_calc_p; + +static int mod_async_load_calc(bool enable) +{ + if (enable == async_load_calc_enabled()) + return 0; + + if (enable) { + load_calc_p = kthread_create(load_calc_func, NULL, "load_calc"); + if (!load_calc_p) + return -ENOMEM; + + wake_up_process(load_calc_p); + static_branch_enable(&async_load_calc); + } else { + kthread_stop(load_calc_p); + load_calc_p = NULL; + + static_branch_disable(&async_load_calc); + } + + return 0; +} + +static DEFINE_MUTEX(load_calc_mutex); + +static ssize_t async_load_calc_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = 0; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + mutex_lock(&load_calc_mutex); + + switch (val) { + case '0': + ret = mod_async_load_calc(false); + break; + case '1': + ret = mod_async_load_calc(true); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&load_calc_mutex); +out: + return ret ? ret : count; +} + +static const struct proc_ops async_load_calc_opt = { + .proc_open = async_load_calc_open, + .proc_read = seq_read, + .proc_write = async_load_calc_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init async_load_calc_init(void) +{ + if (!proc_create("async_load_calc", 0600, NULL, + &async_load_calc_opt)) { + pr_err("Failed to register async_load_calc interface\n"); + return 0; + } + + if (mod_async_load_calc(true)) + pr_err("Failed to enable async_load_calc\n"); + + return 0; +} +late_initcall_sync(async_load_calc_init); +#endif + #ifdef CONFIG_RICH_CONTAINER bool child_cpuacct(struct task_struct *tsk) { diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index fc4bee183fce..c11a84d4676d 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -371,7 +371,8 @@ void calc_global_load(void) WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); - calc_cgroup_load(); + if (!async_load_calc_enabled()) + calc_cgroup_load(); /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0e28246a6158..f10f83a68c52 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3583,8 +3583,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); void calc_cgroup_load(void); +bool async_load_calc_enabled(void); #else static inline void calc_cgroup_load(void) { } +static inline bool async_load_calc_enabled(void) +{ + return false; +} #endif #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From 21f14d1ffd2ad3210eaa8fdb6db9341347afefb6 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 18:47:36 +0800 Subject: [PATCH 818/953] anolis: meminfo: Add meminfo support for rich container ANBZ: #8657 Make /proc/meminfo container aware. "free" command will show the memory cgroup information of the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/meminfo.c | 137 ++++++++++++++++++++++++------------- include/linux/memcontrol.h | 9 +++ include/linux/vmstat.h | 17 +++++ mm/memcontrol.c | 75 ++++++++++++++++++++ 4 files changed, 192 insertions(+), 46 deletions(-) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 45af9a989d40..e3246083c8ce 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -20,6 +20,7 @@ #include #include #include "internal.h" +#include void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { @@ -35,43 +36,89 @@ static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; - long cached; - long available; - unsigned long pages[NR_LRU_LISTS]; unsigned long sreclaimable, sunreclaim; int lru; - si_meminfo(&i); - si_swapinfo(&i); - committed = vm_memory_committed(); - - cached = global_node_page_state(NR_FILE_PAGES) - - total_swapcache_pages() - i.bufferram; - if (cached < 0) - cached = 0; + struct mem_cgroup *memcg = NULL; + struct sysinfo_ext ext; + +#ifdef CONFIG_MEMCG + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + + /* + * current may be in a subcgroup, use reaper instead. + * We assume the reaper always be in the container's + * top group. + */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + memcg = mem_cgroup_from_task(init_tsk); + if (mem_cgroup_is_root(memcg)) + memcg = NULL; + else + css_get(&memcg->css); + put_task_struct(init_tsk); + } + rcu_read_unlock(); +#endif - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + if (!memcg) { + si_meminfo(&i); + si_swapinfo(&i); - available = si_mem_available(); + ext.cached = global_node_page_state(NR_FILE_PAGES) - + total_swapcache_pages() - i.bufferram; + if (ext.cached < 0) + ext.cached = 0; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) { + ext.lrupages[lru] = + global_node_page_state(NR_LRU_BASE + lru); + } + ext.available = si_mem_available(); + ext.file_dirty = global_node_page_state(NR_FILE_DIRTY); + ext.writeback = global_node_page_state(NR_WRITEBACK); + ext.anon_mapped = global_node_page_state(NR_ANON_MAPPED); + ext.file_mapped = global_node_page_state(NR_FILE_MAPPED); + ext.slab_reclaimable = + global_node_page_state(NR_SLAB_RECLAIMABLE_B); + ext.slab_unreclaimable = + global_node_page_state(NR_SLAB_UNRECLAIMABLE_B); + ext.kernel_stack_kb = + global_node_page_state(NR_KERNEL_STACK_KB); + ext.writeback_temp = global_node_page_state(NR_WRITEBACK_TEMP); + ext.anon_thps = global_node_page_state(NR_ANON_THPS); + ext.shmem_thps = global_node_page_state(NR_SHMEM_THPS); + ext.shmem_pmd_mapped = + global_node_page_state(NR_SHMEM_PMDMAPPED); + } else { + memcg_meminfo(memcg, &i, &ext); + } + + committed = percpu_counter_read_positive(&vm_committed_as); sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); - show_val_kb(m, "MemAvailable: ", available); + show_val_kb(m, "MemAvailable: ", ext.available); show_val_kb(m, "Buffers: ", i.bufferram); - show_val_kb(m, "Cached: ", cached); + show_val_kb(m, "Cached: ", ext.cached); show_val_kb(m, "SwapCached: ", total_swapcache_pages()); - show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + - pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + - pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); - show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); - show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); + show_val_kb(m, "Active: ", ext.lrupages[LRU_ACTIVE_ANON] + + ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive: ", ext.lrupages[LRU_INACTIVE_ANON] + + ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Active(anon): ", ext.lrupages[LRU_ACTIVE_ANON]); + show_val_kb(m, "Inactive(anon): ", ext.lrupages[LRU_INACTIVE_ANON]); + show_val_kb(m, "Active(file): ", ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive(file): ", ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Unevictable: ", ext.lrupages[LRU_UNEVICTABLE]); show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK)); #ifdef CONFIG_HIGHMEM @@ -95,22 +142,19 @@ static int meminfo_proc_show(struct seq_file *m, void *v) (unsigned long)atomic_read(&zswap_stored_pages) << (PAGE_SHIFT - 10)); #endif - show_val_kb(m, "Dirty: ", - global_node_page_state(NR_FILE_DIRTY)); - show_val_kb(m, "Writeback: ", - global_node_page_state(NR_WRITEBACK)); - show_val_kb(m, "AnonPages: ", - global_node_page_state(NR_ANON_MAPPED)); - show_val_kb(m, "Mapped: ", - global_node_page_state(NR_FILE_MAPPED)); + show_val_kb(m, "Dirty: ", ext.file_dirty); + show_val_kb(m, "Writeback: ", ext.writeback); + show_val_kb(m, "AnonPages: ", ext.anon_mapped); + show_val_kb(m, "Mapped: ", ext.file_mapped); show_val_kb(m, "Shmem: ", i.sharedram); show_val_kb(m, "KReclaimable: ", sreclaimable + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE)); - show_val_kb(m, "Slab: ", sreclaimable + sunreclaim); - show_val_kb(m, "SReclaimable: ", sreclaimable); - show_val_kb(m, "SUnreclaim: ", sunreclaim); - seq_printf(m, "KernelStack: %8lu kB\n", - global_node_page_state(NR_KERNEL_STACK_KB)); + show_val_kb(m, "Slab: ", + ext.slab_reclaimable + ext.slab_unreclaimable); + + show_val_kb(m, "SReclaimable: ", ext.slab_reclaimable); + show_val_kb(m, "SUnreclaim: ", ext.slab_unreclaimable); + seq_printf(m, "KernelStack: %8lu kB\n", ext.kernel_stack_kb); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", global_node_page_state(NR_KERNEL_SCS_KB)); @@ -123,8 +167,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "NFS_Unstable: ", 0); show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); - show_val_kb(m, "WritebackTmp: ", - global_node_page_state(NR_WRITEBACK_TEMP)); + show_val_kb(m, "WritebackTmp: ", ext.writeback_temp); show_val_kb(m, "CommitLimit: ", vm_commit_limit()); show_val_kb(m, "Committed_AS: ", committed); seq_printf(m, "VmallocTotal: %8lu kB\n", @@ -141,12 +184,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - show_val_kb(m, "AnonHugePages: ", - global_node_page_state(NR_ANON_THPS)); - show_val_kb(m, "ShmemHugePages: ", - global_node_page_state(NR_SHMEM_THPS)); - show_val_kb(m, "ShmemPmdMapped: ", - global_node_page_state(NR_SHMEM_PMDMAPPED)); + show_val_kb(m, "AnonHugePages: ", ext.anon_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemHugePages: ", ext.shmem_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemPmdMapped: ", ext.shmem_pmd_mapped * HPAGE_PMD_NR); show_val_kb(m, "FileHugePages: ", global_node_page_state(NR_FILE_THPS)); show_val_kb(m, "FilePmdMapped: ", @@ -168,6 +208,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) arch_report_meminfo(m); +#ifdef CONFIG_MEMCG + if (memcg) + css_put(&memcg->css); +#endif + return 0; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e4e24da16d2c..95aa1c4cbde4 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1159,6 +1159,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1580,6 +1583,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } +static inline void +memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ +} + static inline void split_page_memcg(struct page *head, unsigned int nr) { } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index fed855bae6d8..a3e76af9bf07 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -34,6 +34,23 @@ struct reclaim_stat { unsigned nr_lazyfree_fail; }; +struct sysinfo_ext { + unsigned long lrupages[NR_LRU_LISTS]; + unsigned long cached; + unsigned long available; + unsigned long file_dirty; + unsigned long writeback; + unsigned long anon_mapped; + unsigned long file_mapped; + unsigned long slab_reclaimable; + unsigned long slab_unreclaimable; + unsigned long kernel_stack_kb; + unsigned long writeback_temp; + unsigned long anon_thps; + unsigned long shmem_thps; + unsigned long shmem_pmd_mapped; +}; + enum writeback_stat_item { NR_DIRTY_THRESHOLD, NR_DIRTY_BG_THRESHOLD, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b26eb7525886..1ff9a77ff343 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7932,4 +7932,79 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ + struct mem_cgroup *iter; + unsigned long limit, memsw_limit, usage, totalram_pages_tmp; + unsigned long pagecache, memcg_wmark, swap_size; + int i; + + ext->cached = memcg_page_state(memcg, NR_FILE_PAGES); + ext->file_dirty = memcg_page_state(memcg, NR_FILE_DIRTY); + ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); + ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); + ext->file_mapped = memcg_page_state(memcg, NR_FILE_MAPPED); + ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B); + ext->slab_unreclaimable = + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); + ext->kernel_stack_kb = memcg_page_state(memcg, NR_KERNEL_STACK_KB); + ext->writeback_temp = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ext->anon_thps = memcg_page_state(memcg, NR_ANON_THPS); +#endif + ext->shmem_thps = 0; + ext->shmem_pmd_mapped = 0; + + swap_size = memcg_page_state(memcg, MEMCG_SWAP); + limit = memsw_limit = PAGE_COUNTER_MAX; + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + limit = min(limit, iter->memory.max); + memsw_limit = min(memsw_limit, iter->memsw.max); + } + usage = mem_cgroup_usage(memcg, false); + totalram_pages_tmp = totalram_pages(); + info->totalram = limit > totalram_pages_tmp ? totalram_pages_tmp : limit; + info->sharedram = memcg_page_state(memcg, NR_SHMEM); + info->freeram = info->totalram - usage; + /* these are not accounted by memcg yet */ + /* if give bufferram the global value, free may show a quite + * large number in the ±buffers/caches row, the reason is + * it's equal to group_used - global_buffer - group_cached, + * if global_buffer > group_used, we get a rewind large value. + */ + info->bufferram = 0; + info->totalhigh = totalhigh_pages(); + info->freehigh = nr_free_highpages(); + info->mem_unit = PAGE_SIZE; + + /* fill in swinfo */ + si_swapinfo(info); + if (memsw_limit < info->totalswap) + info->totalswap = memsw_limit; + info->freeswap = info->totalswap - swap_size; + + for (i = 0; i < NR_LRU_LISTS; i++) + ext->lrupages[i] = memcg_page_state(memcg, NR_LRU_BASE + i); + + /* Like what si_mem_available() does */ + + // TODO: memcg_wmark depends on background async page reclaim, waiting + // for it. + + //memcg_wmark = memcg->memory.wmark_high; + //if (memcg->wmark_ratio && info->totalram > memcg_wmark) + // memcg_wmark = info->totalram - memcg_wmark; + //else + // memcg_wmark = 0; + memcg_wmark = 0; + + pagecache = ext->lrupages[LRU_ACTIVE_FILE] + + ext->lrupages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, memcg_wmark); + ext->available = info->freeram + pagecache; + ext->available += ext->slab_reclaimable - + min(ext->slab_reclaimable / 2, memcg_wmark); +} + #endif /* CONFIG_SWAP */ -- Gitee From 90e2b1f0008b1ceaa8c972e99857a211a398637b Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 8 Jun 2021 18:42:07 +0800 Subject: [PATCH 819/953] anolis: sched/fair: Add parent_wait_contrib statistics ANBZ: #8657 Add a new field "parent_wait_contrib" in struct sched_statistics, it means parent wait_sum during this se running, i.e. preempted by other groups outside the parent group. For a cgroup se: "sum_exec_runtime" stands for "ON CPU" time. "sum_exec_runtime + wait_sum" stands for "Serve" time. "wait_sum" stand for "Queue" time. "parent_wait_contrib" stands for "Queue other" time. "wait_sum - parent_wait_contrib" stands for "Queue self" time. This is useful for containers collision analysis. Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 2 ++ kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 35 +++++++++++++++++++++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 0b7cefd516f5..fbdafb8f5440 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -513,6 +513,8 @@ struct sched_statistics { u64 wait_max; u64 wait_count; u64 wait_sum; + u64 parent_wait_sum_base; + u64 parent_wait_contrib; u64 iowait_count; u64 iowait_sum; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 0baa877597df..075750fbf7b1 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -519,6 +519,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); } @@ -1022,6 +1023,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); PN_SCHEDSTAT(iowait_sum); P_SCHEDSTAT(iowait_count); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 42bacd5e5dd6..272e27ba6c93 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1215,8 +1215,10 @@ static void update_curr_fair(struct rq *rq) static inline void update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + u64 parent_wait_sum, delta, clock = rq_clock(rq_of(cfs_rq)); + struct sched_entity *pse = parent_entity(se); if (!schedstat_enabled()) return; @@ -1227,13 +1229,28 @@ update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_start(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + __schedstat_set(stats->parent_wait_sum_base, parent_wait_sum); } static inline void update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + struct sched_entity *pse = parent_entity(se); + u64 parent_wait_sum, clock = rq_clock(rq_of(cfs_rq)); + u64 delta; if (!schedstat_enabled()) return; @@ -1253,6 +1270,20 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_end(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + /* pick_next_task_fair() can update parent wait_start to 0 */ + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + delta = parent_wait_sum - schedstat_val(stats->parent_wait_sum_base); + __schedstat_add(stats->parent_wait_contrib, delta); } static inline void -- Gitee From b0331752bb2ecbbd8201b091f96d549c1674c781 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 8 Jun 2021 18:10:25 +0800 Subject: [PATCH 820/953] anolis: sched/fair: Add sched_cfs_statistics to export some ANBZ: #8657 Export the following cfs statistics of cgroups: cat cpuacct.sched_cfs_statistics [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] These values include throttle time exported by "cgroup/cpu/cpu.stat". Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 45 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 983d97d050a6..b7d2e32c2387 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -819,6 +819,47 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) return 0; } + +static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + goto rcu_unlock_show; + } + + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } +rcu_unlock_show: + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} #endif static struct cftype files[] = { @@ -865,6 +906,10 @@ static struct cftype files[] = { .read_u64 = enable_sli_read, .write_u64 = enable_sli_write }, + { + .name = "sched_cfs_statistics", + .seq_show = cpuacct_sched_cfs_show, + }, #endif { } /* terminate */ }; -- Gitee From 442fcfe7649bb9145a0653a742b075a93ad80f4c Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Tue, 8 Jun 2021 19:42:36 +0800 Subject: [PATCH 821/953] anolis: cpuacct: make cpuacct record nr_migrations ANBZ: #8657 This patch makes cpuacct to be able to monitor the number of across-cpu-migrations. Output as follows: [root@caspar /sys/fs/cgroup/cpuacct] # cat cpuacct.proc_stat user 7727 nice 4 nr_migrations 48432 Signed-off-by: Zhu Yanhai Signed-off-by: Caspar Zhang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/core.c | 1 + kernel/sched/cpuacct.c | 44 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 ++ 3 files changed, 47 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 47370c27889b..0517646191da 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3411,6 +3411,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.nr_migrations++; rseq_migrate(p); sched_mm_cid_migrate_from(p); + task_ca_increase_nr_migrations(p); perf_event_task_migrate(p); } diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index b7d2e32c2387..f66127f79c39 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -25,6 +25,13 @@ struct cpuacct_prev_cputime { struct prev_cputime prev_cputime2; /* user and nice */ } ____cacheline_aligned; +#ifdef CONFIG_SCHED_SLI +/* Maintain various statistics */ +struct cpuacct_alistats { + u64 nr_migrations; +} ____cacheline_aligned; +#endif + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; @@ -33,6 +40,7 @@ struct cpuacct { struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI + struct cpuacct_alistats __percpu *alistats; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -58,12 +66,32 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +#endif + static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, +#ifdef CONFIG_SCHED_SLI + .alistats = &root_alistats, +#endif }; +#ifdef CONFIG_SCHED_SLI +void task_ca_increase_nr_migrations(struct task_struct *tsk) +{ + struct cpuacct *ca; + + rcu_read_lock(); + ca = task_ca(tsk); + if (ca) + this_cpu_ptr(ca->alistats)->nr_migrations++; + rcu_read_unlock(); +} +#endif + #ifdef CONFIG_SCHED_SLI static DEFINE_SPINLOCK(sli_ca_lock); LIST_HEAD(sli_ca_list); @@ -143,6 +171,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) #ifdef CONFIG_SCHED_SLI INIT_LIST_HEAD(&ca->sli_list); + + ca->alistats = alloc_percpu(struct cpuacct_alistats); + if (!ca->alistats) + goto out_free_pre_cputime; #endif for_each_possible_cpu(i) { @@ -154,6 +186,8 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; +out_free_pre_cputime: + free_percpu(ca->prev_cputime); out_free_cpustat: free_percpu(ca->cpustat); out_free_cpuusage: @@ -179,6 +213,9 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); +#ifdef CONFIG_SCHED_SLI + free_percpu(ca->alistats); +#endif kfree(ca); } @@ -737,6 +774,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + u64 nr_migrations = 0; + struct cpuacct_alistats *alistats; unsigned long load, avnrun[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -767,6 +806,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) iowait += res.iowait; idle += res.idle; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; nr_run += ca_running(ca, cpu); nr_uninter += ca_uninterruptible(ca, cpu); } @@ -787,6 +828,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) idle += get_idle_time(kcpustat, cpu); iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; } nr_run = nr_running(); @@ -816,6 +859,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) if ((long) nr_uninter < 0) nr_uninter = 0; seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); return 0; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f10f83a68c52..1e5190b2514b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3582,9 +3582,11 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); #ifdef CONFIG_SCHED_SLI extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +extern void task_ca_increase_nr_migrations(struct task_struct *tsk); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else +static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { -- Gitee From dff02857218bce1f21c83c7b54aaf9c70b402e39 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 10:39:58 +0800 Subject: [PATCH 822/953] anolis: Introduce cfs scheduling latency histograms ANBZ: #8657 Export wait_latency in "cpuacct.wait_latency", which indicates the time that tasks in a cpuacct cgroup wait on a cfs_rq to be scheduled. This is like "perf sched", but it gives smaller overhead. So it can be used as monitor constantly. wait_latency is useful to debug application's high RT problem. It can tell if it's caused by scheduling or not. If it is, loadavg can tell if it's caused by bad scheduling bahaviour or system overloads. System admins can also use wait_latency to define SLA. To ensure SLA is guaranteed, there are various ways to decrease wait_latency. This feature is disabled by default for performance concerns. It can be switched on dynamically by "echo 0 > /proc/cpusli/sched_lat_enable" Example: $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 4139 1-4ms: 317 4-7ms: 568 7-10ms: 0 10-100ms: 42324 100-500ms: 9131 500-1000ms: 95 1000-5000ms: 134 5000-10000ms: 0 >=10000ms: 0 total(ms): 4256455 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 255 ++++++++++++++++++++++++++++++++++++++++- kernel/sched/fair.c | 6 +- kernel/sched/sched.h | 3 + 3 files changed, 262 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index f66127f79c39..0e60d1ad84fd 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,72 @@ struct cpuacct_alistats { } ____cacheline_aligned; #endif +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + enum sched_lat_count_t idx; + + if (msecs < 1) + idx = SCHED_LAT_0_1; + else if (msecs < 4) + idx = SCHED_LAT_1_4; + else if (msecs < 7) + idx = SCHED_LAT_4_7; + else if (msecs < 10) + idx = SCHED_LAT_7_10; + else if (msecs < 100) + idx = SCHED_LAT_10_100; + else if (msecs < 500) + idx = SCHED_LAT_100_500; + else if (msecs < 1000) + idx = SCHED_LAT_500_1000; + else if (msecs < 5000) + idx = SCHED_LAT_1000_5000; + else if (msecs < 10000) + idx = SCHED_LAT_5000_10000; + else + idx = SCHED_LAT_10000_INF; + + return idx; +} + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; @@ -41,6 +107,7 @@ struct cpuacct { struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI struct cpuacct_alistats __percpu *alistats; + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -68,6 +135,7 @@ static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); #ifdef CONFIG_SCHED_SLI static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); #endif static struct cpuacct root_cpuacct = { @@ -76,10 +144,83 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, #ifdef CONFIG_SCHED_SLI .alistats = &root_alistats, + .lat_stat_cpu = &root_lat_stat_cpu, #endif }; #ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpuacct_no_sched_lat); +static int cpuacct_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpuacct_no_sched_lat)); + return 0; +} + +static int cpuacct_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpuacct_sched_lat_enabled_show, NULL); +} + +static ssize_t cpuacct_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpuacct_no_sched_lat); + break; + case '1': + static_branch_disable(&cpuacct_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpuacct_sched_lat_enabled_fops = { + .proc_open = cpuacct_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpuacct_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpuacct_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpuacct_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpuacct_sched_lat_enabled); + void task_ca_increase_nr_migrations(struct task_struct *tsk) { struct cpuacct *ca; @@ -90,6 +231,25 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) this_cpu_ptr(ca->alistats)->nr_migrations++; rcu_read_unlock(); } + +void cpuacct_update_latency(struct task_struct *tsk, u64 delta) +{ + enum sched_lat_count_t idx; + struct cpuacct *ca; + unsigned int msecs; + + if (static_branch_likely(&cpuacct_no_sched_lat)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); + this_cpu_add(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][SCHED_LAT_TOTAL], + delta); + rcu_read_unlock(); +} #endif #ifdef CONFIG_SCHED_SLI @@ -170,11 +330,16 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) goto out_free_cpustat; #ifdef CONFIG_SCHED_SLI + ca->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!ca->lat_stat_cpu) + goto out_free_pre_cputime; + + INIT_LIST_HEAD(&ca->sli_list); ca->alistats = alloc_percpu(struct cpuacct_alistats); if (!ca->alistats) - goto out_free_pre_cputime; + goto out_free_lat_stat_cpu; #endif for_each_possible_cpu(i) { @@ -186,8 +351,12 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; +#ifdef CONFIG_SCHED_SLI +out_free_lat_stat_cpu: + free_percpu(ca->lat_stat_cpu); out_free_pre_cputime: free_percpu(ca->prev_cputime); +#endif out_free_cpustat: free_percpu(ca->cpustat); out_free_cpuusage: @@ -215,6 +384,7 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->cpuusage); #ifdef CONFIG_SCHED_SLI free_percpu(ca->alistats); + free_percpu(ca->lat_stat_cpu); #endif kfree(ca); } @@ -904,6 +1074,83 @@ static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) return 0; } + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct cpuacct *ca = (struct cpuacct *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(ca->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency +}; + +static int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cpuacct *ca = css_ca(css); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (val != 0) + return -EINVAL; + + func((void *)ca); + smp_call_function(func, (void *)ca, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct cpuacct *ca, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(ca->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +static int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-100ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); + + return 0; +} #endif static struct cftype files[] = { @@ -954,6 +1201,12 @@ static struct cftype files[] = { .name = "sched_cfs_statistics", .seq_show = cpuacct_sched_cfs_show, }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 272e27ba6c93..f1d522883151 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1257,6 +1257,8 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) stats = __schedstats_from_se(se); + delta = clock - schedstat_val(stats->wait_start); + /* * When the sched_schedstat changes from 0 to 1, some sched se * maybe already in the runqueue, the se->statistics.wait_start @@ -1266,8 +1268,10 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(!schedstat_val(stats->wait_start))) return; - if (entity_is_task(se)) + if (entity_is_task(se)) { p = task_of(se); + cpuacct_update_latency(p, delta); + } __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1e5190b2514b..4722da3c4785 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3583,10 +3583,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); +void cpuacct_update_latency(struct task_struct *tsk, u64 delta); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } +static inline void cpuacct_update_latency(struct task_struct *tsk, + u64 delta) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { -- Gitee From bce1008e350405c6e785485dc046b6ff9815fe74 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 11:03:59 +0800 Subject: [PATCH 823/953] anolis: sched: Add cgroup-level blocked time histograms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8657 This patch measures time that tasks in cpuacct cgroup blocks. There are two types: blocked due to IO, and others like locks. And they are exported in"cpuacct.ioblock_latency" and "cpuacct.block_latency" respectively. According to histogram, we know the detailed distribution of the duration. And according to total(ms), we know the percentage of time tasks spent off rq, waiting for resources: (△ioblock_latency.total(ms) + △block_latency.total(ms)) / △wall_time The interface output format is identical to cpuacct.wait_latency. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 52 +++++++++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 3 +++ kernel/sched/stats.c | 1 + 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0e60d1ad84fd..34b40e7b6d57 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -34,6 +34,8 @@ struct cpuacct_alistats { enum sched_lat_stat_item { SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, SCHED_LAT_NR_STAT }; @@ -232,6 +234,34 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) rcu_read_unlock(); } +void task_ca_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct cpuacct *ca; + unsigned int msecs; + + if (static_branch_likely(&cpuacct_no_sched_lat)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + if (!ca) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + void cpuacct_update_latency(struct task_struct *tsk, u64 delta) { enum sched_lat_count_t idx; @@ -243,6 +273,10 @@ void cpuacct_update_latency(struct task_struct *tsk, u64 delta) rcu_read_lock(); ca = task_ca(tsk); + if (!ca) { + rcu_read_unlock(); + return; + } msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); @@ -1086,9 +1120,13 @@ static void smp_write_##name(void *info) \ } \ SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); smp_call_func_t smp_sched_lat_write_funcs[] = { - smp_write_sched_wait_latency + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency }; static int sched_lat_stat_write(struct cgroup_subsys_state *css, @@ -1207,6 +1245,18 @@ static struct cftype files[] = { .write_u64 = sched_lat_stat_write, .seq_show = sched_lat_stat_show }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4722da3c4785..fb6e68549b6d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3584,12 +3584,15 @@ extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); void cpuacct_update_latency(struct task_struct *tsk, u64 delta); +void task_ca_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } static inline void cpuacct_update_latency(struct task_struct *tsk, u64 delta) { } +static inline void task_ca_update_block(struct task_struct *tsk, + u64 runtime) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 857f837f52cb..3da2fdf4aca7 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,6 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { + task_ca_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); -- Gitee From d1d0d2f79a75da2b4721daaff261fe4dbe2bf6a6 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 14:12:44 +0800 Subject: [PATCH 824/953] anolis: sched: Add cgroup's scheduling latency histograms ANBZ: #8657 This patch adds cpuacct.cgroup_wait_latency interface. It exports the histogram of the sched entity's schedule latency. Unlike wait_latency, the sched entity is a cgroup rather than task. This is useful when tasks are not directly clustered under one cgroup. For examples: cgroup1 --- cgroupA --- task1 --- cgroupB --- task2 cgroup2 --- cgroupC --- task3 --- cgroupD --- task4 This is a common cgroup hierarchy used by many applications. With cgroup_wait_latency, we can just read from cgroup1 to know aggregated wait latency information of task1 and task2. The interface output format is identical to cpuacct.wait_latency. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 37 +++++++++++++++++++++++++++++++------ kernel/sched/fair.c | 6 +++--- kernel/sched/sched.h | 4 ++-- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 34b40e7b6d57..5c562b2fee25 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -36,6 +36,7 @@ enum sched_lat_stat_item { SCHED_LAT_WAIT, SCHED_LAT_BLOCK, SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, SCHED_LAT_NR_STAT }; @@ -117,6 +118,12 @@ struct cpuacct { unsigned long avenrun[3]; }; +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpuacct_cgrp_id), + struct cpuacct, css); +} + static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) { return css ? container_of(css, struct cpuacct, css) : NULL; @@ -262,26 +269,36 @@ void task_ca_update_block(struct task_struct *tsk, u64 runtime) rcu_read_unlock(); } -void cpuacct_update_latency(struct task_struct *tsk, u64 delta) +void cpuacct_update_latency(struct sched_entity *se, u64 delta) { - enum sched_lat_count_t idx; + int idx; + enum sched_lat_stat_item s; struct cpuacct *ca; unsigned int msecs; + struct task_group *tg; if (static_branch_likely(&cpuacct_no_sched_lat)) return; + tg = se->cfs_rq->tg; + if (task_group_is_autogroup(tg)) + return; + rcu_read_lock(); - ca = task_ca(tsk); + ca = cgroup_ca(tg->css.cgroup); if (!ca) { rcu_read_unlock(); return; } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); - this_cpu_add(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][SCHED_LAT_TOTAL], - delta); + this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); rcu_read_unlock(); } #endif @@ -1120,11 +1137,13 @@ static void smp_write_##name(void *info) \ } \ SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); smp_call_func_t smp_sched_lat_write_funcs[] = { smp_write_sched_wait_latency, + smp_write_sched_wait_cgroup_latency, smp_write_sched_block_latency, smp_write_sched_ioblock_latency }; @@ -1245,6 +1264,12 @@ static struct cftype files[] = { .write_u64 = sched_lat_stat_write, .seq_show = sched_lat_stat_show }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, { .name = "block_latency", .private = SCHED_LAT_BLOCK, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f1d522883151..89b24d93e03c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1268,10 +1268,10 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(!schedstat_val(stats->wait_start))) return; - if (entity_is_task(se)) { + if (entity_is_task(se)) p = task_of(se); - cpuacct_update_latency(p, delta); - } + + cpuacct_update_latency(se, delta); __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fb6e68549b6d..34720a51c5df 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3583,13 +3583,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); -void cpuacct_update_latency(struct task_struct *tsk, u64 delta); +void cpuacct_update_latency(struct sched_entity *se, u64 delta); void task_ca_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } -static inline void cpuacct_update_latency(struct task_struct *tsk, +static inline void cpuacct_update_latency(struct sched_entity *se, u64 delta) { } static inline void task_ca_update_block(struct task_struct *tsk, u64 runtime) { } -- Gitee From 92bcc935b9b0c236e3aa7d4ccf827081ac2f5932 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 18 Jun 2020 18:30:20 +0800 Subject: [PATCH 825/953] anolis: sched: Add "nr" to sched latency histogram ANBZ: #8657 Sometimes histogram is not precise enough because each sample is roughly accounted into a histogram bar. And average latency is more pratical for some users. This patch adds a "nr" field in 4 latency histogram interfaces, so lat(avg) = total(ms) / nr And compared to histogram, average latency is better to be used as a SLI because of simplicity. Example $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 4139 1-4ms: 317 4-7ms: 568 7-10ms: 0 10-100ms: 42324 100-500ms: 9131 500-1000ms: 95 1000-5000ms: 134 5000-10000ms: 0 >=10000ms: 0 total(ms): 4256455 nr: 182128 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 5c562b2fee25..aaff1693edd3 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -66,6 +66,7 @@ enum sched_lat_count_t { SCHED_LAT_5000_10000, SCHED_LAT_10000_INF, SCHED_LAT_TOTAL, + SCHED_LAT_NR, SCHED_LAT_NR_COUNT, }; @@ -265,6 +266,7 @@ void task_ca_update_block(struct task_struct *tsk, u64 runtime) msecs = runtime >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); rcu_read_unlock(); } @@ -298,6 +300,7 @@ void cpuacct_update_latency(struct sched_entity *se, u64 delta) msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); rcu_read_unlock(); } @@ -1205,6 +1208,8 @@ static int sched_lat_stat_show(struct seq_file *sf, void *v) sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); seq_printf(sf, "total(ms): \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_NR)); return 0; } -- Gitee From a8509fc191b4f4e1aced4af6ecdb8a77a0cb0057 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 21 May 2020 13:13:06 +0800 Subject: [PATCH 826/953] anolis: sched: Finer grain of sched latency ANBZ: #8657 Many samples are between 10ms-50ms. To display more informative distribution of latency, divide 10ms-50ms into 5 parts uniformly. Example: $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 59726433 1-4ms: 167 4-7ms: 0 7-10ms: 0 10-20ms: 5 20-30ms: 0 30-40ms: 3 40-50ms: 0 50-100ms: 0 100-500ms: 0 500-1000ms: 0 1000-5000ms: 0 5000-10000ms: 0 >=10000ms: 0 total(ms): 45554 nr: 59726600 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index aaff1693edd3..3f264a22a28d 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -59,7 +59,11 @@ enum sched_lat_count_t { SCHED_LAT_1_4, SCHED_LAT_4_7, SCHED_LAT_7_10, - SCHED_LAT_10_100, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, SCHED_LAT_100_500, SCHED_LAT_500_1000, SCHED_LAT_1000_5000, @@ -86,8 +90,16 @@ static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) idx = SCHED_LAT_4_7; else if (msecs < 10) idx = SCHED_LAT_7_10; + else if (msecs < 20) + idx = SCHED_LAT_10_20; + else if (msecs < 30) + idx = SCHED_LAT_20_30; + else if (msecs < 40) + idx = SCHED_LAT_30_40; + else if (msecs < 50) + idx = SCHED_LAT_40_50; else if (msecs < 100) - idx = SCHED_LAT_10_100; + idx = SCHED_LAT_50_100; else if (msecs < 500) idx = SCHED_LAT_100_500; else if (msecs < 1000) @@ -1194,8 +1206,16 @@ static int sched_lat_stat_show(struct seq_file *sf, void *v) sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); seq_printf(sf, "7-10ms: \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); - seq_printf(sf, "10-100ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10_100)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_50_100)); seq_printf(sf, "100-500ms: \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); seq_printf(sf, "500-1000ms: \t%llu\n", -- Gitee From d9b0431388ffdff60f87fe470b388fc26ab8e5bb Mon Sep 17 00:00:00 2001 From: Erwei Deng Date: Thu, 27 Aug 2020 16:47:42 +0800 Subject: [PATCH 827/953] anolis: sched: get_sched_lat_count_idx optimization ANBZ: #8657 Optimize the get_sched_lat_count_idx function. The raw function use too many if-else branches which could consume more time and bring a little performance loss. I use another method that use less if-else branches and do some test for the performance improvement. I generate 10000 random numbers in each different ranges and run these two methods recording the total running time(us). See the result table below: --------------------------------------- range | raw | own | perf --------------------------------------- [0, 10) | 163 | 57 | +65.03% [0, 50) | 209 | 81 | +61.24% [0, 100) | 174 | 131 | +24.71% [0, 1000) | 192 | 73 | +61.98% [0, 10000) | 203 | 79 | +61.08% [0, 100000) | 141 | 69 | +51.06% We can see that our own method displays the better result. Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 44 +++++++++++++----------------------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 3f264a22a28d..a15a8e761188 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -80,38 +80,20 @@ struct sched_cgroup_lat_stat_cpu { static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) { - enum sched_lat_count_t idx; - if (msecs < 1) - idx = SCHED_LAT_0_1; - else if (msecs < 4) - idx = SCHED_LAT_1_4; - else if (msecs < 7) - idx = SCHED_LAT_4_7; - else if (msecs < 10) - idx = SCHED_LAT_7_10; - else if (msecs < 20) - idx = SCHED_LAT_10_20; - else if (msecs < 30) - idx = SCHED_LAT_20_30; - else if (msecs < 40) - idx = SCHED_LAT_30_40; - else if (msecs < 50) - idx = SCHED_LAT_40_50; - else if (msecs < 100) - idx = SCHED_LAT_50_100; - else if (msecs < 500) - idx = SCHED_LAT_100_500; - else if (msecs < 1000) - idx = SCHED_LAT_500_1000; - else if (msecs < 5000) - idx = SCHED_LAT_1000_5000; - else if (msecs < 10000) - idx = SCHED_LAT_5000_10000; - else - idx = SCHED_LAT_10000_INF; - - return idx; + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; } /* track CPU usage of a group of tasks and its child groups */ -- Gitee From c26807ffb16a33bbf947430d446717b34621210a Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 9 Jun 2021 17:54:47 +0800 Subject: [PATCH 828/953] anolis: sched: Introduce load 1/5/15 for running tasks ANBZ: #8657 Traditional load 1/5/15 includes both running and uninterruptible tasks, sometimes we need to distinguish the two types of loads, thus adding a separate load 1/5/15 for running tasks only. Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/loadavg.c | 2 +- include/linux/sched.h | 5 +- include/linux/sched/loadavg.h | 10 ++++ kernel/sched/core.c | 8 ++++ kernel/sched/cpuacct.c | 54 +++++++++++++++++---- kernel/sched/loadavg.c | 89 +++++++++++++++++++++++++++++++++-- kernel/sched/sched.h | 13 +++++ 7 files changed, 163 insertions(+), 18 deletions(-) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 7205049d2935..73d956336bfa 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -27,8 +27,8 @@ static int loadavg_proc_show(struct seq_file *m, void *v) init_tsk = task_active_pid_ns(current)->child_reaper; get_task_struct(init_tsk); read_unlock(&tasklist_lock); + get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0, false); - get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0); cpuset_cpus_allowed(init_tsk, &cpuset_allowed); for_each_cpu(i, &cpuset_allowed) nr_R += task_ca_running(init_tsk, i); diff --git a/include/linux/sched.h b/include/linux/sched.h index fbdafb8f5440..48744e4755d0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2534,7 +2534,7 @@ void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, struct cpuacct_usage_result *res); unsigned long task_ca_running(struct task_struct *tsk, int cpu); void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift); + unsigned long offset, int shift, bool running); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); @@ -2548,7 +2548,8 @@ static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) } static inline void get_cgroup_avenrun(struct task_struct *tsk, - unsigned long *loads, unsigned long offset, int shift) { } + unsigned long *loads, unsigned long offset, + int shift, bool running) { } static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 83ec54b65e79..20165894027d 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -13,8 +13,18 @@ * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long avenrun_r[]; /* R load averages */ + extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); +#ifdef CONFIG_SCHED_SLI +extern void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift); +#else +static inline void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift) { } +#endif + #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<__lock); rq->nr_running = 0; rq->calc_load_active = 0; +#ifdef CONFIG_SCHED_SLI + rq->calc_load_active_r = 0; +#endif rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index a15a8e761188..ef183fb41911 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -111,6 +111,9 @@ struct cpuacct { u64 next_load_update; #endif unsigned long avenrun[3]; +#ifdef CONFIG_SCHED_SLI + unsigned long avenrun_r[3]; +#endif }; static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) @@ -397,6 +400,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); } + ca->avenrun[0] = ca->avenrun[1] = ca->avenrun[2] = 0; +#ifdef CONFIG_SCHED_SLI + ca->avenrun_r[0] = ca->avenrun_r[1] = ca->avenrun_r[2] = 0; +#endif return &ca->css; #ifdef CONFIG_SCHED_SLI @@ -643,20 +650,24 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) static unsigned long ca_running(struct cpuacct *ca, int cpu); static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, - unsigned long offset, int shift) + unsigned long offset, int shift, bool running) { unsigned long *avenrun; - avenrun = ca->avenrun; + if (running) + avenrun = ca->avenrun_r; + else + avenrun = ca->avenrun; + loads[0] = (avenrun[0] + offset) << shift; loads[1] = (avenrun[1] + offset) << shift; loads[2] = (avenrun[2] + offset) << shift; } void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift) + unsigned long offset, int shift, bool running) { - __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift); + __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift, running); } unsigned long task_ca_running(struct task_struct *tsk, int cpu) @@ -859,22 +870,36 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, static void cpuacct_calc_load(struct cpuacct *acct) { - if (acct != &root_cpuacct) { - long active = 0; - int cpu; + long active = 0, active_r = 0, nr_r; + int cpu; + if (acct != &root_cpuacct) { for_each_possible_cpu(cpu) { - active += ca_running(acct, cpu); + nr_r = ca_running(acct, cpu); + active += nr_r; + active_r += nr_r; active += ca_uninterruptible(acct, cpu); } active = active > 0 ? active * FIXED_1 : 0; acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + + active_r = active_r > 0 ? active_r * FIXED_1 : 0; + acct->avenrun_r[0] = calc_load(acct->avenrun_r[0], + EXP_1, active_r); + acct->avenrun_r[1] = calc_load(acct->avenrun_r[1], + EXP_5, active_r); + acct->avenrun_r[2] = calc_load(acct->avenrun_r[2], + EXP_15, active_r); } else { acct->avenrun[0] = avenrun[0]; acct->avenrun[1] = avenrun[1]; acct->avenrun[2] = avenrun[2]; + + acct->avenrun_r[0] = avenrun_r[0]; + acct->avenrun_r[1] = avenrun_r[1]; + acct->avenrun_r[2] = avenrun_r[2]; } } @@ -994,7 +1019,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; u64 nr_migrations = 0; struct cpuacct_alistats *alistats; - unsigned long load, avnrun[3]; + unsigned long load, avnrun[3], avnrun_r[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -1030,7 +1055,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_uninter += ca_uninterruptible(ca, cpu); } - __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0); + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0, false); + __get_cgroup_avenrun(ca, avnrun_r, FIXED_1/200, 0, true); } else { struct kernel_cpustat *kcpustat; @@ -1054,6 +1080,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_uninter = nr_uninterruptible(); get_avenrun(avnrun, FIXED_1/200, 0); + get_avenrun_r(avnrun_r, FIXED_1/200, 0); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -1079,6 +1106,13 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); + load = LOAD_INT(avnrun_r[0]) * 100 + LOAD_FRAC(avnrun_r[0]); + seq_printf(sf, "running load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[1]) * 100 + LOAD_FRAC(avnrun_r[1]); + seq_printf(sf, "running load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[2]) * 100 + LOAD_FRAC(avnrun_r[2]); + seq_printf(sf, "running load average(15min) %lld\n", (u64)load); + return 0; } diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index c11a84d4676d..13761c5d1bfb 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -56,8 +56,10 @@ /* Variables and functions for calc_load */ atomic_long_t calc_load_tasks; -unsigned long calc_load_update; +atomic_long_t calc_load_tasks_r; unsigned long avenrun[3]; +unsigned long avenrun_r[3]; +unsigned long calc_load_update; EXPORT_SYMBOL(avenrun); /* should be removed */ /** @@ -90,6 +92,29 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } +#ifdef CONFIG_SCHED_SLI +void get_avenrun_r(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun_r[0] + offset) << shift; + loads[1] = (avenrun_r[1] + offset) << shift; + loads[2] = (avenrun_r[2] + offset) << shift; +} + +long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + long nr_active, delta = 0; + + nr_active = this_rq->nr_running - adjust; + + if (nr_active != this_rq->calc_load_active_r) { + delta = nr_active - this_rq->calc_load_active_r; + this_rq->calc_load_active_r = nr_active; + } + + return delta; +} +#endif + /** * fixed_power_int - compute: x^n, in O(log n) time * @@ -203,6 +228,9 @@ calc_load_n(unsigned long load, unsigned long exp, * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_nohz[2]; +#ifdef CONFIG_SCHED_SLI +static atomic_long_t calc_load_nohz_r[2]; +#endif static int calc_load_idx; static inline int calc_load_write_idx(void) @@ -233,13 +261,17 @@ static inline int calc_load_read_idx(void) static void calc_load_nohz_fold(struct rq *rq) { long delta; + int idx = calc_load_write_idx(); delta = calc_load_fold_active(rq, 0); - if (delta) { - int idx = calc_load_write_idx(); - + if (delta) atomic_long_add(delta, &calc_load_nohz[idx]); - } + +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_nohz_r[idx]); +#endif } void calc_load_nohz_start(void) @@ -291,6 +323,19 @@ static long calc_load_nohz_read(void) return delta; } +#ifdef CONFIG_SCHED_SLI +static long calc_load_nohz_r_read(void) +{ + int idx = calc_load_read_idx(); + long delta = 0; + + if (atomic_long_read(&calc_load_nohz_r[idx])) + delta = atomic_long_xchg(&calc_load_nohz_r[idx], 0); + + return delta; +} +#endif + /* * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into @@ -320,6 +365,16 @@ static void calc_global_nohz(void) avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); +#ifdef CONFIG_SCHED_SLI + /* Calc avenrun_r */ + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load_n(avenrun_r[0], EXP_1, active, n); + avenrun_r[1] = calc_load_n(avenrun_r[1], EXP_5, active, n); + avenrun_r[2] = calc_load_n(avenrun_r[2], EXP_15, active, n); +#endif + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); } @@ -336,6 +391,7 @@ static void calc_global_nohz(void) #else /* !CONFIG_NO_HZ_COMMON */ static inline long calc_load_nohz_read(void) { return 0; } +static inline long calc_load_nohz_r_read(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -369,6 +425,23 @@ void calc_global_load(void) avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); +#ifdef CONFIG_SCHED_SLI + /* + * Calculate load 1/5/15 for running tasks only. We do not + * invent common functions to keep the same layout as upstream. + */ + delta = calc_load_nohz_r_read(); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); + + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load(avenrun_r[0], EXP_1, active); + avenrun_r[1] = calc_load(avenrun_r[1], EXP_5, active); + avenrun_r[2] = calc_load(avenrun_r[2], EXP_15, active); +#endif + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); if (!async_load_calc_enabled()) @@ -396,5 +469,11 @@ void calc_global_load_tick(struct rq *this_rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(this_rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif + this_rq->calc_load_update += LOAD_FREQ; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 34720a51c5df..e5d9f3680897 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -113,12 +113,22 @@ extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; extern atomic_long_t calc_load_tasks; +extern atomic_long_t calc_load_tasks_r; extern unsigned int sysctl_sched_child_runs_first; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +#ifdef CONFIG_SCHED_SLI +extern long calc_load_fold_active_r(struct rq *this_rq, long adjust); +#else +static inline long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + return 0; +} +#endif + extern void call_trace_sched_update_nr_running(struct rq *rq, int count); extern unsigned int sysctl_sched_rt_period; @@ -1125,6 +1135,9 @@ struct rq { /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; +#ifdef CONFIG_SCHED_SLI + long calc_load_active_r; +#endif #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP -- Gitee From f8d409ba6cd08dc87f33f8bcb342039428441139 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 15 Jun 2021 11:17:39 +0800 Subject: [PATCH 829/953] anolis: make the rich container support k8s. ANBZ: #8657 For k8s, multpile containers share one pid_namespace, the child reaper lives in one special "pause" container amongst. Then we should use current other than the child reaper to get the information, thus it deserves a runtime interface. Introduce "/proc/sys/kernel/rich_container_source": - 0 means to use cgroups of "current" by default. - 1 means to use cgroups of "child reaper". Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 14 +----- fs/proc/loadavg.c | 8 +-- fs/proc/meminfo.c | 19 +------ fs/proc/stat.c | 10 ++-- fs/proc/uptime.c | 4 +- include/linux/memcontrol.h | 9 ++++ include/linux/pid_namespace.h | 8 +++ include/linux/sched.h | 40 +++++++++++---- kernel/cgroup/cpuset.c | 24 +++++++++ kernel/pid_namespace.c | 1 + kernel/sched/cpuacct.c | 94 ++++++++++++++++++++++++----------- kernel/sysctl.c | 10 ++++ mm/memcontrol.c | 34 +++++++++++++ 13 files changed, 199 insertions(+), 76 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 925dacc2d266..91d16e663099 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -219,29 +219,17 @@ static ssize_t show_cpus_attr(struct device *dev, { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); struct cpumask cpuset_allowed; - struct task_struct *init_tsk; bool rich_container; rcu_read_lock(); rich_container = in_rich_container(current); - if (rich_container) { - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - } else { - init_tsk = NULL; - } rcu_read_unlock(); if (rich_container && !strcmp(attr->attr.name, "online")) - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_get_cpuset_cpus(&cpuset_allowed); else cpumask_copy(&cpuset_allowed, ca->map); - if (init_tsk) - put_task_struct(init_tsk); - return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 73d956336bfa..7a7e443a58c8 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -22,16 +22,18 @@ static int loadavg_proc_show(struct seq_file *m, void *v) rcu_read_lock(); if (in_rich_container(current)) { struct task_struct *init_tsk; + enum rich_container_source from; read_lock(&tasklist_lock); init_tsk = task_active_pid_ns(current)->child_reaper; get_task_struct(init_tsk); read_unlock(&tasklist_lock); - get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0, false); - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_source(&from); + rich_container_get_avenrun(from, init_tsk, avnrun, FIXED_1/200, 0, false); + rich_container_get_cpuset_cpus(&cpuset_allowed); for_each_cpu(i, &cpuset_allowed) - nr_R += task_ca_running(init_tsk, i); + nr_R += rich_container_get_running(from, init_tsk, i); put_task_struct(init_tsk); } else { get_avenrun(avnrun, FIXED_1/200, 0); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index e3246083c8ce..fc14e63d0c15 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -45,24 +45,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #ifdef CONFIG_MEMCG rcu_read_lock(); if (in_rich_container(current)) { - struct task_struct *init_tsk; - - /* - * current may be in a subcgroup, use reaper instead. - * We assume the reaper always be in the container's - * top group. - */ - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - - memcg = mem_cgroup_from_task(init_tsk); - if (mem_cgroup_is_root(memcg)) - memcg = NULL; - else - css_get(&memcg->css); - put_task_struct(init_tsk); + memcg = rich_container_get_memcg(); } rcu_read_unlock(); #endif diff --git a/fs/proc/stat.c b/fs/proc/stat.c index d2ec9dcddb31..9c1d734f0069 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -94,6 +94,7 @@ static int show_stat(struct seq_file *p, void *v) unsigned int nr_runnable = 0; struct task_struct *init_tsk = NULL; struct cpuacct_usage_result res; + enum rich_container_source from; bool rich_container; user = nice = system = idle = iowait = @@ -113,9 +114,10 @@ static int show_stat(struct seq_file *p, void *v) read_unlock(&tasklist_lock); boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_get_cpuset_cpus(&cpuset_allowed); + rich_container_source(&from); for_each_cpu(i, &cpuset_allowed) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); user += res.user; nice += res.nice; system += res.system; @@ -176,7 +178,7 @@ static int show_stat(struct seq_file *p, void *v) rcu_read_lock(); if (rich_container) { for_each_cpu(i, &cpuset_allowed) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); seq_printf(p, "cpu%d", seq++); seq_put_decimal_ull(p, " ", @@ -244,7 +246,7 @@ static int show_stat(struct seq_file *p, void *v) rcu_read_lock(); if (rich_container) { for_each_cpu(i, &cpuset_allowed) - nr_runnable += task_ca_running(init_tsk, i); + nr_runnable += rich_container_get_running(from, init_tsk, i); } else nr_runnable = nr_running(); rcu_read_unlock(); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 95622fd62885..591909b4d111 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -26,6 +26,7 @@ static int uptime_proc_show(struct seq_file *m, void *v) rcu_read_lock(); if (in_rich_container(current)) { + enum rich_container_source from; struct task_struct *init_tsk; struct cpuacct_usage_result res; @@ -34,8 +35,9 @@ static int uptime_proc_show(struct seq_file *m, void *v) get_task_struct(init_tsk); read_unlock(&tasklist_lock); + rich_container_source(&from); for_each_possible_cpu(i) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); idle_nsec += res.idle; } uptime = timespec64_sub(uptime, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 95aa1c4cbde4..e8498d088bb2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1162,6 +1162,15 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, void memcg_meminfo(struct mem_cgroup *memcg, struct sysinfo *info, struct sysinfo_ext *ext); +#ifdef CONFIG_RICH_CONTAINER +struct mem_cgroup *rich_container_get_memcg(void); +#else +static inline struct mem_cgroup *rich_container_get_memcg(void) +{ + return NULL; +} +#endif + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index a71999081213..d913d86d29d8 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -125,6 +125,8 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) #ifdef CONFIG_RICH_CONTAINER extern int sysctl_rich_container_enable; +extern int sysctl_rich_container_source; + static inline bool in_rich_container(struct task_struct *tsk) { if (sysctl_rich_container_enable == 0) @@ -132,11 +134,17 @@ static inline bool in_rich_container(struct task_struct *tsk) return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); } + +void rich_container_get_cpuset_cpus(struct cpumask *pmask); #else static inline bool in_rich_container(struct task_struct *tsk) { return false; } + +static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ +} #endif #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 48744e4755d0..6faee810d18a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2528,28 +2528,50 @@ struct cpuacct_usage_result { u64 steal, iowait, idle, guest, guest_nice; }; +enum rich_container_source { + RICH_CONTAINER_REAPER, + RICH_CONTAINER_CURRENT, +}; + #ifdef CONFIG_RICH_CONTAINER +void rich_container_source(enum rich_container_source *from); bool child_cpuacct(struct task_struct *tsk); -void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, struct cpuacct_usage_result *res); -unsigned long task_ca_running(struct task_struct *tsk, int cpu); -void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu); +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, unsigned long offset, int shift, bool running); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); #else -static inline void cpuacct_get_usage_result(struct task_struct *tsk, - int cpu, struct cpuacct_usage_result *res) { } +static inline void +rich_container_source(enum rich_container_source *from) +{ +} -static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) +static inline void +rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ +} + +static inline unsigned long +rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) { return 0; } -static inline void get_cgroup_avenrun(struct task_struct *tsk, - unsigned long *loads, unsigned long offset, - int shift, bool running) { } +static inline void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ +} static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index a0edf8070dc3..5d7357fb4494 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -43,6 +43,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); @@ -4002,6 +4003,29 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) spin_unlock_irqrestore(&callback_lock, flags); } +#ifdef CONFIG_RICH_CONTAINER +void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ + unsigned long flags; + struct task_struct *p; + + rcu_read_lock(); + if (sysctl_rich_container_source == 1) { + read_lock(&tasklist_lock); + p = task_active_pid_ns(current)->child_reaper; + read_unlock(&tasklist_lock); + + } else { + p = current; + } + + spin_lock_irqsave(&callback_lock, flags); + guarantee_online_cpus(p, pmask); + spin_unlock_irqrestore(&callback_lock, flags); + rcu_read_unlock(); +} +#endif + /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. * @tsk: pointer to task_struct with which the scheduler is struggling diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 26a3517568ee..bd99e88d74eb 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -27,6 +27,7 @@ #ifdef CONFIG_RICH_CONTAINER int sysctl_rich_container_enable; +int sysctl_rich_container_source; /* 0 - current; 1 - child_reaper */ #endif static DEFINE_MUTEX(pid_caches_mutex); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index ef183fb41911..a1a0e0a23ed9 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -664,17 +664,6 @@ static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, loads[2] = (avenrun[2] + offset) << shift; } -void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift, bool running) -{ - __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift, running); -} - -unsigned long task_ca_running(struct task_struct *tsk, int cpu) -{ - return ca_running(task_ca(tsk), cpu); -} - static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), @@ -1002,16 +991,6 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } -void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, - struct cpuacct_usage_result *res) -{ - struct cpuacct *ca = task_ca(tsk); - struct cgroup *cgrp = ca->css.cgroup; - struct task_group *tg = cgroup_tg(cgrp); - - __cpuacct_get_usage_result(ca, cpu, tg, res); -} - static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) { struct cpuacct *ca = css_ca(seq_css(sf)); @@ -1543,11 +1522,11 @@ bool child_cpuacct(struct task_struct *tsk) return false; } + bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { struct cpumask cpuset_allowed; - struct task_struct *init_tsk; bool in_rich; int i, id = 0; @@ -1559,12 +1538,7 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, *rich_container = true; - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); - put_task_struct(init_tsk); + rich_container_get_cpuset_cpus(&cpuset_allowed); *total = cpumask_weight(&cpuset_allowed); if (cpumask_test_cpu(cpu, &cpuset_allowed)) { @@ -1580,4 +1554,68 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, /* Hide this cpu in the container */ return true; } + +void rich_container_source(enum rich_container_source *from) +{ + if (sysctl_rich_container_source == 1) + *from = RICH_CONTAINER_REAPER; + else + *from = RICH_CONTAINER_CURRENT; +} + +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca_src; + struct task_group *tg; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + tg = cgroup_tg(ca_src->css.cgroup); + __cpuacct_get_usage_result(ca_src, cpu, tg, res); + rcu_read_unlock(); +} + +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + struct cpuacct *ca_src; + unsigned long nr; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + nr = ca_running(ca_src, cpu); + rcu_read_unlock(); + + return nr; +} + +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + struct cpuacct *ca_src; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + __get_cgroup_avenrun(ca_src, loads, offset, shift, running); + rcu_read_unlock(); +} + #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6d0bcc3c205e..15aaa24bf595 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -2089,6 +2090,15 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "rich_container_source", + .data = &sysctl_rich_container_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #endif { } }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1ff9a77ff343..3639bd87d6c6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -7932,6 +7933,39 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +#endif /* CONFIG_MEMCG_SWAP */ + +#ifdef CONFIG_RICH_CONTAINER +static inline struct mem_cgroup *css_memcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +/* with rcu lock held */ +struct mem_cgroup *rich_container_get_memcg(void) +{ + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg_src; + + if (sysctl_rich_container_source == 1) + css = NULL; + else + css = task_css(current, memory_cgrp_id); + + if (css) { + memcg_src = css_memcg(css); + } else { + read_lock(&tasklist_lock); + memcg_src = mem_cgroup_from_task(task_active_pid_ns(current)->child_reaper); + read_unlock(&tasklist_lock); + } + + if (css_tryget(&memcg_src->css)) + return memcg_src; + else + return NULL; +} + void memcg_meminfo(struct mem_cgroup *memcg, struct sysinfo *info, struct sysinfo_ext *ext) { -- Gitee From fd1b3083e5eb89322208dc5c957512d6f7100246 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 15 Jun 2021 15:00:35 +0800 Subject: [PATCH 830/953] anolis: cpuinfo: Add cpuinfo support of cpu quota and cpu share ANBZ: #8657 lxcfs supports rich container cpuinfo from cpu quota, also sigma cpushare has the requirement of getting cpuinfo from cpu.shares. Thus, introduce new knobs to support these: /proc/sys/kernel/rich_container_cpuinfo_source - 0 uses cpu quota "quota/period" by default. It will fall back to use cpuset.cpus if quota not set. - 1 uses cpuset.cpus. - 2 uses cpushare "cpu.shares/1024" (1024 can be configured below) /proc/sys/kernel/rich_container_cpuinfo_sharesbase - when rich_container_cpuinfo_source is 2, this is the divisor. Note that, after faking cpuinfo in dockers, it's impossible to make /proc/stat match them accordingly, but we only care about the following stats not /proc/stat: /proc/cpuinfo, sysfs online and /proc/meminfo Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 13 +++++- include/linux/pid_namespace.h | 15 +++++++ include/linux/sched.h | 9 +++- kernel/sched/core.c | 4 +- kernel/sched/cpuacct.c | 79 ++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 3 ++ kernel/sysctl.c | 17 ++++++++ 7 files changed, 134 insertions(+), 6 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 91d16e663099..d186a8b17a4c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -219,14 +219,23 @@ static ssize_t show_cpus_attr(struct device *dev, { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; bool rich_container; rcu_read_lock(); rich_container = in_rich_container(current); rcu_read_unlock(); - if (rich_container && !strcmp(attr->attr.name, "online")) - rich_container_get_cpuset_cpus(&cpuset_allowed); + if (rich_container && !strcmp(attr->attr.name, "online")) { + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + + rich_container_get_cpus(scenario, &cpuset_allowed); + + put_task_struct(scenario); + } else cpumask_copy(&cpuset_allowed, ca->map); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index d913d86d29d8..9da7d0da722c 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -126,6 +126,16 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) #ifdef CONFIG_RICH_CONTAINER extern int sysctl_rich_container_enable; extern int sysctl_rich_container_source; +extern int sysctl_rich_container_cpuinfo_source; +extern unsigned int sysctl_rich_container_cpuinfo_sharesbase; + +static inline struct task_struct *rich_container_get_scenario(void) +{ + if (sysctl_rich_container_source == 1) + return task_active_pid_ns(current)->child_reaper; + + return current; +} static inline bool in_rich_container(struct task_struct *tsk) { @@ -145,6 +155,11 @@ static inline bool in_rich_container(struct task_struct *tsk) static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) { } + +static inline struct task_struct *rich_container_get_scenario(void) +{ + return NULL; +} #endif #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 6faee810d18a..5ce57c478069 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2547,7 +2547,9 @@ void rich_container_get_avenrun(enum rich_container_source from, bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); -#else +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask); + +#else /* CONFIG_RICH_CONTAINER */ static inline void rich_container_source(enum rich_container_source *from) { @@ -2578,6 +2580,11 @@ static inline bool check_rich_container(unsigned int cpu, unsigned int *index, { return false; } + +static inline +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ +} #endif #ifdef CONFIG_SCHED_SLI diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a38518b721db..b482fbb0cee7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11172,7 +11172,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_quota(struct task_group *tg) +long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; @@ -11200,7 +11200,7 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_period(struct task_group *tg) +long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index a1a0e0a23ed9..71abcc0b79c5 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -1512,6 +1512,77 @@ late_initcall_sync(async_load_calc_init); #endif #ifdef CONFIG_RICH_CONTAINER + +/* 0 - cpu quota; 1 - cpuset.cpus; 2 - cpu.shares */ +int sysctl_rich_container_cpuinfo_source; +/* when cpu.shares */ +unsigned int sysctl_rich_container_cpuinfo_sharesbase = 1024; + +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + +static inline struct task_group *task_tg(struct task_struct *tsk) +{ + return css_tg(task_css(tsk, cpu_cgrp_id)); +} + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ + struct task_group *tg; + int i, cpus; + + /* cfs quota source */ + if (sysctl_rich_container_cpuinfo_source == 0) { + long quota, period; + + rcu_read_lock(); + tg = task_tg(tsk); + quota = tg_get_cfs_quota(tg); + period = tg_get_cfs_period(tg); + rcu_read_unlock(); + + if (quota == -1) { + /* Fallback to use cpuset.cpus if quota not set */ + goto cpuset_source; + } else { + /* period can't be 0 */ + cpus = (quota + period - 1) / period; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + } + + return; + } + + /* cpu.shares source */ + if (sysctl_rich_container_cpuinfo_source == 2) { + unsigned long shares; + + rcu_read_lock(); + tg = task_tg(tsk); + shares = scale_load_down(tg->shares); + rcu_read_unlock(); + + /* sysctl_rich_container_cpuinfo_sharesbase can't be 0 */ + cpus = (shares + sysctl_rich_container_cpuinfo_sharesbase - 1) / + sysctl_rich_container_cpuinfo_sharesbase; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + + return; + } + +cpuset_source: + /* cpuset.cpus source */ + cpuset_cpus_allowed(tsk, pmask); +} + bool child_cpuacct(struct task_struct *tsk) { struct cpuacct *ca = task_ca(tsk); @@ -1527,6 +1598,7 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; bool in_rich; int i, id = 0; @@ -1538,7 +1610,12 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, *rich_container = true; - rich_container_get_cpuset_cpus(&cpuset_allowed); + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + rich_container_get_cpus(scenario, &cpuset_allowed); + put_task_struct(scenario); *total = cpumask_weight(&cpuset_allowed); if (cpumask_test_cpu(cpu, &cpuset_allowed)) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e5d9f3680897..acd1436fba19 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3613,4 +3613,7 @@ static inline bool async_load_calc_enabled(void) } #endif +long tg_get_cfs_quota(struct task_group *tg); +long tg_get_cfs_period(struct task_group *tg); + #endif /* _KERNEL_SCHED_SCHED_H */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 15aaa24bf595..777baff4d527 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2099,6 +2099,23 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "rich_container_cpuinfo_source", + .data = &sysctl_rich_container_cpuinfo_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_TWO, + }, + { + .procname = "rich_container_cpuinfo_sharesbase", + .data = &sysctl_rich_container_cpuinfo_sharesbase, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_TWO, + }, #endif { } }; -- Gitee From a5ab50e9fb6a685ebe053691296d12d99d352772 Mon Sep 17 00:00:00 2001 From: Erwei Deng Date: Wed, 15 Dec 2021 10:10:54 +0800 Subject: [PATCH 831/953] anolis: Kconfig: RICH_CONTAINER should select SCHED_SLI. ANBZ: #8657 The SCHED_SLI should be setted automatically when RICH_CONTAINER is setted. If RICH_CONTAINER is opened and SCHED_SLI closed, it makes no sense to open the rich container. Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- init/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/init/Kconfig b/init/Kconfig index 2c9af56b108a..d6fa4c01ddd6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1144,6 +1144,7 @@ config RICH_CONTAINER depends on CGROUP_CPUACCT depends on CFS_BANDWIDTH depends on CPUSETS + select SCHED_SLI default n help Make containers feel like VMs. Change the following interface -- Gitee From 799eaf3af885c14d2492919719f5c5ca20f9956e Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Wed, 6 Sep 2023 12:06:24 +0800 Subject: [PATCH 832/953] anolis: sched/fair: Support sched_cfs_statistics in cgroup v2 ANBZ: #8657 Export the following cfs statistics of cgroups: cat cpu.sched_cfs_statistics [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] Signed-off-by: Yi Tao --- kernel/sched/core.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b482fbb0cee7..4401d0c1b032 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11780,6 +11780,41 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, } #endif +#ifdef CONFIG_SCHED_SLI +static int cpu_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = css_tg(seq_css(sf)); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype cpu_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11841,6 +11876,12 @@ static struct cftype cpu_files[] = { .read_u64 = cpu_ht_ratio_read, .write_u64 = cpu_ht_ratio_write, }, +#endif +#ifdef CONFIG_SCHED_SLI + { + .name = "sched_cfs_statistics", + .seq_show = cpu_sched_cfs_show, + }, #endif { } /* terminate */ }; -- Gitee From 3c92537eb0d7e9b522ea6888f8907a49177503d8 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Tue, 12 Sep 2023 17:03:19 +0800 Subject: [PATCH 833/953] anolis: sched, cpuacct: Move scheduling latency data from cpuacct to task_group ANBZ: #8657 Because cgroup v2 disables the cpuacct controller, to support retrieving scheduling latency data in cgroup v2, move the relevant structures from cpuacct to task_group. Signed-off-by: Yi Tao --- kernel/sched/core.c | 280 ++++++++++++++++++++++++++++++++++++- kernel/sched/cpuacct.c | 308 +---------------------------------------- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 61 +++++++- kernel/sched/stats.c | 2 +- 5 files changed, 339 insertions(+), 314 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4401d0c1b032..86950ffc05a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10085,11 +10085,19 @@ int in_sched_functions(unsigned long addr) } #ifdef CONFIG_CGROUP_SCHED +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); +#endif + /* * Default task group. * Every task in system belongs to this group at bootup. */ -struct task_group root_task_group; +struct task_group root_task_group = { +#ifdef CONFIG_SCHED_SLI + .lat_stat_cpu = &root_lat_stat_cpu, +#endif +}; LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ @@ -10562,6 +10570,12 @@ static void sched_free_group(struct task_group *tg) free_fair_sched_group(tg); free_rt_sched_group(tg); autogroup_free(tg); + +#ifdef CONFIG_SCHED_SLI + if (tg->lat_stat_cpu) + free_percpu(tg->lat_stat_cpu); +#endif + kmem_cache_free(task_group_cache, tg); } @@ -10596,6 +10610,12 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; +#ifdef CONFIG_SCHED_SLI + tg->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!tg->lat_stat_cpu) + goto err; +#endif + alloc_uclamp_sched_group(tg, parent); #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) @@ -11781,6 +11801,264 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, #endif #ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpu_no_sched_lat); +static int cpu_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpu_no_sched_lat)); + return 0; +} + +static int cpu_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpu_sched_lat_enabled_show, NULL); +} + +static ssize_t cpu_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpu_no_sched_lat); + break; + case '1': + static_branch_disable(&cpu_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpu_sched_lat_enabled_fops = { + .proc_open = cpu_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpu_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpu_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpu_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpu_sched_lat_enabled); + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + if (msecs < 1) + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; +} + +struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +void task_cpu_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct task_group *tg; + unsigned int msecs; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = css_tg(task_css(tsk, cpu_cgrp_id)); + if (!tg) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + +void cpu_update_latency(struct sched_entity *se, u64 delta) +{ + int idx; + enum sched_lat_stat_item s; + unsigned int msecs; + struct task_group *tg; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = se->cfs_rq->tg; + if (!tg) { + rcu_read_unlock(); + return; + } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); + rcu_read_unlock(); +} + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct task_group *tg = (struct task_group *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(tg->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency, + smp_write_sched_wait_cgroup_latency +}; + +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cgroup *cgrp = css->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + if (val != 0) + return -EINVAL; + + func((void *)tg); + smp_call_function(func, (void *)tg, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct task_group *tg, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(tg->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = cgroup_tg(seq_css(sf)->cgroup); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_50_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_NR)); + + return 0; +} + static int cpu_sched_cfs_show(struct seq_file *sf, void *v) { struct task_group *tg = css_tg(seq_css(sf)); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 71abcc0b79c5..6d87a617d00e 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,69 +32,6 @@ struct cpuacct_alistats { } ____cacheline_aligned; #endif -enum sched_lat_stat_item { - SCHED_LAT_WAIT, - SCHED_LAT_BLOCK, - SCHED_LAT_IOBLOCK, - SCHED_LAT_CGROUP_WAIT, - SCHED_LAT_NR_STAT -}; - -/* - * [0, 1ms) - * [1, 4ms) - * [4, 7ms) - * [7, 10ms) - * [10, 100ms) - * [100, 500ms) - * [500, 1000ms) - * [1000, 5000ms) - * [5000, 10000ms) - * [10000ms, INF) - * total(ms) - */ -/* Scheduler latency histogram distribution, in milliseconds */ -enum sched_lat_count_t { - SCHED_LAT_0_1, - SCHED_LAT_1_4, - SCHED_LAT_4_7, - SCHED_LAT_7_10, - SCHED_LAT_10_20, - SCHED_LAT_20_30, - SCHED_LAT_30_40, - SCHED_LAT_40_50, - SCHED_LAT_50_100, - SCHED_LAT_100_500, - SCHED_LAT_500_1000, - SCHED_LAT_1000_5000, - SCHED_LAT_5000_10000, - SCHED_LAT_10000_INF, - SCHED_LAT_TOTAL, - SCHED_LAT_NR, - SCHED_LAT_NR_COUNT, -}; - -struct sched_cgroup_lat_stat_cpu { - unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; -}; - -static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) -{ - if (msecs < 1) - return SCHED_LAT_0_1; - if (msecs < 10) - return SCHED_LAT_0_1 + (msecs + 2) / 3; - if (msecs < 50) - return SCHED_LAT_7_10 + msecs / 10; - if (msecs < 100) - return SCHED_LAT_50_100; - if (msecs < 1000) - return SCHED_LAT_100_500 + (msecs / 500); - if (msecs < 10000) - return SCHED_LAT_1000_5000 + (msecs / 5000); - - return SCHED_LAT_10000_INF; -} /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { @@ -105,7 +42,6 @@ struct cpuacct { struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI struct cpuacct_alistats __percpu *alistats; - struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -142,7 +78,6 @@ static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); #ifdef CONFIG_SCHED_SLI static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); -static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); #endif static struct cpuacct root_cpuacct = { @@ -151,82 +86,10 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, #ifdef CONFIG_SCHED_SLI .alistats = &root_alistats, - .lat_stat_cpu = &root_lat_stat_cpu, #endif }; #ifdef CONFIG_SCHED_SLI -static DEFINE_STATIC_KEY_TRUE(cpuacct_no_sched_lat); -static int cpuacct_sched_lat_enabled_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%d\n", !static_key_enabled(&cpuacct_no_sched_lat)); - return 0; -} - -static int cpuacct_sched_lat_enabled_open(struct inode *inode, - struct file *file) -{ - return single_open(file, cpuacct_sched_lat_enabled_show, NULL); -} - -static ssize_t cpuacct_sched_lat_enabled_write(struct file *file, - const char __user *ubuf, - size_t count, loff_t *ppos) -{ - char val = -1; - int ret = count; - - if (count < 1 || *ppos) { - ret = -EINVAL; - goto out; - } - - if (copy_from_user(&val, ubuf, 1)) { - ret = -EFAULT; - goto out; - } - - switch (val) { - case '0': - static_branch_enable(&cpuacct_no_sched_lat); - break; - case '1': - static_branch_disable(&cpuacct_no_sched_lat); - break; - default: - ret = -EINVAL; - } - -out: - return ret; -} - -static const struct proc_ops cpuacct_sched_lat_enabled_fops = { - .proc_open = cpuacct_sched_lat_enabled_open, - .proc_read = seq_read, - .proc_write = cpuacct_sched_lat_enabled_write, - .proc_lseek = seq_lseek, - .proc_release = single_release, -}; - -static int __init init_cpuacct_sched_lat_enabled(void) -{ - struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; - - ca_dir = proc_mkdir("cpusli", NULL); - if (!ca_dir) - return -ENOMEM; - - sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, - ca_dir, &cpuacct_sched_lat_enabled_fops); - if (!sched_lat_enabled_file) { - remove_proc_entry("cpusli", NULL); - return -ENOMEM; - } - - return 0; -} -device_initcall(init_cpuacct_sched_lat_enabled); void task_ca_increase_nr_migrations(struct task_struct *tsk) { @@ -239,68 +102,6 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) rcu_read_unlock(); } -void task_ca_update_block(struct task_struct *tsk, u64 runtime) -{ - int idx; - enum sched_lat_stat_item s; - struct cpuacct *ca; - unsigned int msecs; - - if (static_branch_likely(&cpuacct_no_sched_lat)) - return; - - rcu_read_lock(); - ca = task_ca(tsk); - if (!ca) { - rcu_read_unlock(); - return; - } - if (tsk->in_iowait) - s = SCHED_LAT_IOBLOCK; - else - s = SCHED_LAT_BLOCK; - - msecs = runtime >> 20; /* Proximately to speed up */ - idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); - this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); - this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); - rcu_read_unlock(); -} - -void cpuacct_update_latency(struct sched_entity *se, u64 delta) -{ - int idx; - enum sched_lat_stat_item s; - struct cpuacct *ca; - unsigned int msecs; - struct task_group *tg; - - if (static_branch_likely(&cpuacct_no_sched_lat)) - return; - - tg = se->cfs_rq->tg; - if (task_group_is_autogroup(tg)) - return; - - rcu_read_lock(); - ca = cgroup_ca(tg->css.cgroup); - if (!ca) { - rcu_read_unlock(); - return; - } - if (entity_is_task(se)) - s = SCHED_LAT_WAIT; - else - s = SCHED_LAT_CGROUP_WAIT; - - msecs = delta >> 20; /* Proximately to speed up */ - idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); - this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); - this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); - rcu_read_unlock(); -} #endif #ifdef CONFIG_SCHED_SLI @@ -381,16 +182,11 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) goto out_free_cpustat; #ifdef CONFIG_SCHED_SLI - ca->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); - if (!ca->lat_stat_cpu) - goto out_free_pre_cputime; - - INIT_LIST_HEAD(&ca->sli_list); ca->alistats = alloc_percpu(struct cpuacct_alistats); if (!ca->alistats) - goto out_free_lat_stat_cpu; + goto out_free_pre_cputime; #endif for_each_possible_cpu(i) { @@ -407,8 +203,6 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; #ifdef CONFIG_SCHED_SLI -out_free_lat_stat_cpu: - free_percpu(ca->lat_stat_cpu); out_free_pre_cputime: free_percpu(ca->prev_cputime); #endif @@ -439,7 +233,6 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->cpuusage); #ifdef CONFIG_SCHED_SLI free_percpu(ca->alistats); - free_percpu(ca->lat_stat_cpu); #endif kfree(ca); } @@ -664,12 +457,6 @@ static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, loads[2] = (avenrun[2] + offset) << shift; } -static inline struct task_group *cgroup_tg(struct cgroup *cgrp) -{ - return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), - struct task_group, css); -} - static inline unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; @@ -1135,99 +922,6 @@ static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) return 0; } - -#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ -static void smp_write_##name(void *info) \ -{ \ - struct cpuacct *ca = (struct cpuacct *)info; \ - int i; \ - \ - for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ - this_cpu_write(ca->lat_stat_cpu->item[sidx][i], 0); \ -} \ - -SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); -SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); -SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); -SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); - -smp_call_func_t smp_sched_lat_write_funcs[] = { - smp_write_sched_wait_latency, - smp_write_sched_wait_cgroup_latency, - smp_write_sched_block_latency, - smp_write_sched_ioblock_latency -}; - -static int sched_lat_stat_write(struct cgroup_subsys_state *css, - struct cftype *cft, u64 val) -{ - struct cpuacct *ca = css_ca(css); - enum sched_lat_stat_item idx = cft->private; - smp_call_func_t func = smp_sched_lat_write_funcs[idx]; - - if (val != 0) - return -EINVAL; - - func((void *)ca); - smp_call_function(func, (void *)ca, 1); - - return 0; -} - -static u64 sched_lat_stat_gather(struct cpuacct *ca, - enum sched_lat_stat_item sidx, - enum sched_lat_count_t cidx) -{ - u64 sum = 0; - int cpu; - - for_each_possible_cpu(cpu) - sum += per_cpu_ptr(ca->lat_stat_cpu, cpu)->item[sidx][cidx]; - - return sum; -} - -static int sched_lat_stat_show(struct seq_file *sf, void *v) -{ - struct cpuacct *ca = css_ca(seq_css(sf)); - enum sched_lat_stat_item s = seq_cft(sf)->private; - - /* CFS scheduling latency cgroup and task histgrams */ - seq_printf(sf, "0-1ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_0_1)); - seq_printf(sf, "1-4ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_1_4)); - seq_printf(sf, "4-7ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); - seq_printf(sf, "7-10ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); - seq_printf(sf, "10-20ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10_20)); - seq_printf(sf, "20-30ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_20_30)); - seq_printf(sf, "30-40ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_30_40)); - seq_printf(sf, "40-50ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_40_50)); - seq_printf(sf, "50-100ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_50_100)); - seq_printf(sf, "100-500ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); - seq_printf(sf, "500-1000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_500_1000)); - seq_printf(sf, "1000-5000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_1000_5000)); - seq_printf(sf, "5000-10000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_5000_10000)); - seq_printf(sf, ">=10000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); - seq_printf(sf, "total(ms): \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); - seq_printf(sf, "nr: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_NR)); - - return 0; -} #endif static struct cftype files[] = { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 89b24d93e03c..e07e96c9bca6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1271,7 +1271,7 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (entity_is_task(se)) p = task_of(se); - cpuacct_update_latency(se, delta); + cpu_update_latency(se, delta); __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index acd1436fba19..c5e21508bf6b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -348,6 +348,52 @@ struct rt_rq; extern struct list_head task_groups; +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH raw_spinlock_t lock; @@ -434,6 +480,9 @@ struct task_group { unsigned int ht_ratio; #endif +#ifdef CONFIG_SCHED_SLI + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -3596,15 +3645,19 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); -void cpuacct_update_latency(struct sched_entity *se, u64 delta); -void task_ca_update_block(struct task_struct *tsk, u64 runtime); +void cpu_update_latency(struct sched_entity *se, u64 delta); +void task_cpu_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); +struct task_group *cgroup_tg(struct cgroup *cgrp); +int sched_lat_stat_show(struct seq_file *sf, void *v); +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } -static inline void cpuacct_update_latency(struct sched_entity *se, +static inline void cpu_update_latency(struct sched_entity *se, u64 delta) { } -static inline void task_ca_update_block(struct task_struct *tsk, +static inline void task_cpu_update_block(struct task_struct *tsk, u64 runtime) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 3da2fdf4aca7..f43ae791bc72 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,7 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { - task_ca_update_block(p, delta); + task_cpu_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); -- Gitee From 9367322005ee6d887d06f09e8f929b974395bb0b Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Tue, 12 Sep 2023 17:39:56 +0800 Subject: [PATCH 834/953] anolis: sched: Support latency histograms in cpu controller ANBZ: #8657 Export latency histograms in cpu controller. The relevant interface names are as follows: cpu.wait_latency cpu.cgroup_wait_latency cpu.block_latency cpu.ioblock_latency Signed-off-by: Yi Tao --- kernel/sched/core.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 86950ffc05a3..18b57ff2b5dc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -12160,6 +12160,30 @@ static struct cftype cpu_files[] = { .name = "sched_cfs_statistics", .seq_show = cpu_sched_cfs_show, }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; -- Gitee From 0fd97a3135cb18bdb8336cbc56d989dfbba2ac8a Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Sun, 7 Apr 2024 15:51:32 +0800 Subject: [PATCH 835/953] anolis: crypto:update Mont-TSSE driver for new function ANBZ: #8710 Mont-TSSE(TM) is a high speed crypto algorithm accelerator, it support SM2/3/4, AES and SHA algorithms. The new function support users to update firmware file by sysfs.Besides, the coding standard problems have been fixed in Mont-TSSE driver. Performance: SM4-CBC: 179Gbps; SM3-HASH:195Gbps; SM2-SIGN:955223ops;SM2-VERIFY:345385ops; SM2-ENC:207253ops; SM2-DEC:239520ops Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/3014 --- drivers/crypto/montage/tsse/tsse_dev.h | 3 +- drivers/crypto/montage/tsse/tsse_dev_drv.c | 65 +++++++- drivers/crypto/montage/tsse/tsse_dev_mgr.c | 30 +--- drivers/crypto/montage/tsse/tsse_fw_service.c | 103 ++++++------ drivers/crypto/montage/tsse/tsse_fw_service.h | 6 +- drivers/crypto/montage/tsse/tsse_ipc.c | 153 +++++++++--------- drivers/crypto/montage/tsse/tsse_ipc.h | 15 +- drivers/crypto/montage/tsse/tsse_service.c | 8 +- 8 files changed, 202 insertions(+), 181 deletions(-) diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h index d1dafee61300..c16d2ae7c414 100644 --- a/drivers/crypto/montage/tsse/tsse_dev.h +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -18,6 +18,7 @@ #define TSSE_PCI_MAX_BARS 4 #define TSSE_FW_VERSION_LEN 32 + struct tsse_bar { void __iomem *virt_addr; resource_size_t addr; @@ -58,6 +59,7 @@ struct tsse_dev { void *mbx_hw; const struct firmware *fw; char fw_version[TSSE_FW_VERSION_LEN]; + bool fw_version_exist; }; #define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) #define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) @@ -72,7 +74,6 @@ int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); void tsse_devmgr_rm_dev(struct tsse_dev *tdev); int tsse_prepare_restart_dev(struct tsse_dev *tdev); int tsse_start_dev(struct tsse_dev *tdev); -struct tsse_dev *get_tssedev(int id); static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) { diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c index 9e914576a129..86c619d64f5e 100644 --- a/drivers/crypto/montage/tsse/tsse_dev_drv.c +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -11,18 +11,25 @@ #include #include #include +#include #include "tsse_dev_drv.h" #include "tsse_vuart.h" #include "tsse_ipc.h" #include "tsse_fw_service.h" +#define CLUSTER_SLOT_CONFIG_OFFSET 0x5780000 +#define QPAIR_SETTING_OFFSET 0x50000 +#define BAR_START 2 +#define BAR_END 4 + static DEFINE_IDA(tsse_ida); static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) { writel(enable ? 1 : 0, - TSSE_DEV_BARS(tdev)[2].virt_addr + 0x5780000 + 0x50000); + TSSE_DEV_BARS(tdev)[2].virt_addr + + CLUSTER_SLOT_CONFIG_OFFSET + QPAIR_SETTING_OFFSET); } static int tsse_sriov_disable(struct tsse_dev *tdev) { @@ -107,6 +114,40 @@ static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) return num_vfs_param; } +/** + * tsse_image_load_store() - This function will be called when user + * writes string to /sys/bus/pci/devices/.../tsse_image_load. + * Driver will always loads /lib/firmware/tsse_firmware.bin. + * @dev: device + * @attr: device attribute + * @buf: string that user writes + * @count: string length that user writes + * Return: the number of bytes used from the buffer, here it is just the count argument. + */ +static ssize_t tsse_image_load_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = NULL; + struct tsse_dev *tdev = NULL; + + pdev = container_of(dev, struct pci_dev, dev); + if (pdev) + tdev = pci_to_tsse_dev(pdev); + if (buf && count && tdev) { + tsse_dev_info(tdev, "receive command to load firmware %s\n", TSSE_FIRMWARE); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + if (tsse_fw_manual_load_ipc(pdev)) + dev_err(&pdev->dev, "%s %d: firmware update failed\n", + __func__, __LINE__); + } + } + return count; +} + +DEVICE_ATTR_WO(tsse_image_load); + static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int status = 0; @@ -163,7 +204,7 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_err; } - for (bar = 2; bar < 4;) { + for (bar = BAR_START; bar < BAR_END;) { TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); TSSE_DEV_BARS(tdev) @@ -219,9 +260,13 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) status = -EFAULT; goto out_err_port_init; } + + tdev->fw_version_exist = false; /* Its result not break driver init process */ - if (!tsse_fw_load(pdev)) - get_firmware_version((char *)tdev->fw->data, tdev->fw->size, tdev->fw_version); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + } if (tsse_ipc_init(pdev)) { dev_err(&pdev->dev, @@ -231,13 +276,22 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_err_ipc; } + if (sysfs_create_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr)) { + dev_err(&pdev->dev, + "%s %d: sysfs_create_file failed for tsse image load.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_image_load; + } + tsse_dev_info(tdev, "successful\n"); pci_read_config_dword(pdev, 0x720, &tmp_val); tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); return 0; - +out_err_image_load: + tsse_ipc_deinit(tdev); out_err_ipc: vuart_uninit_port(pdev); out_err_port_init: @@ -261,6 +315,7 @@ static void device_remove(struct pci_dev *pdev) release_firmware(tdev->fw); tdev->fw = NULL; } + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr); tsse_ipc_deinit(tdev); vuart_uninit_port(pdev); tsse_devmgr_rm_dev(tdev); diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c index 159f75c8f46f..39553eb96832 100644 --- a/drivers/crypto/montage/tsse/tsse_dev_mgr.c +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -34,12 +34,6 @@ static inline void tsse_list_add(struct list_head *new, struct list_head *prev, WRITE_ONCE(prev->next, new); } -static inline void tsse_list_add_tail(struct list_head *new, - struct list_head *head) -{ - tsse_list_add(new, head->prev, head); -} - static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) { int ret = 0; @@ -104,7 +98,7 @@ void tsse_dev_put(struct tsse_dev *tdev) } } -int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +static int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) { int times, max_retry = 150; @@ -172,13 +166,11 @@ int tsse_start_dev(struct tsse_dev *tdev) clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); return ret; } -EXPORT_SYMBOL_GPL(tsse_start_dev); int tsse_prepare_restart_dev(struct tsse_dev *tdev) { return tsse_stop_dev(tdev, false); } -EXPORT_SYMBOL_GPL(tsse_prepare_restart_dev); void tsse_devmgr_rm_dev(struct tsse_dev *tdev) { @@ -186,7 +178,6 @@ void tsse_devmgr_rm_dev(struct tsse_dev *tdev) tsse_dev_free_irq_vectors(tdev); msleep(300); } -EXPORT_SYMBOL_GPL(tsse_devmgr_rm_dev); int tsse_devmgr_add_dev(struct tsse_dev *tdev) { @@ -203,27 +194,8 @@ int tsse_devmgr_add_dev(struct tsse_dev *tdev) } return ret; } -EXPORT_SYMBOL_GPL(tsse_devmgr_add_dev); struct list_head *tsse_devmgr_get_head(void) { return &tsse_dev_table; } - -struct tsse_dev *get_tssedev(int id) -{ - struct list_head *itr; - struct tsse_dev *ptr; - - mutex_lock(&tsse_dev_table_lock); - - list_for_each(itr, &tsse_dev_table) { - ptr = list_entry(itr, struct tsse_dev, list); - break; - } - - mutex_unlock(&tsse_dev_table_lock); - - return ptr; -} -EXPORT_SYMBOL_GPL(get_tssedev); diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c index fc3907a7c503..486352bc8f84 100644 --- a/drivers/crypto/montage/tsse/tsse_fw_service.c +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -21,25 +21,28 @@ #include "tsse_service.h" #define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" -#define SEARCH_PATTERN_LEN 28 +#define SPACE_CH ' ' -int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +static int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) { u8 *h2d; u32 int_reg; - u32 rc; mutex_lock(&tsseipc->list_lock); int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); if ((int_reg & IPC_REGISTER_INT_SET) != 0) { - rc = -1; mutex_unlock(&tsseipc->list_lock); - return rc; + return -EFAULT; + } + if (msg->header.i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + dev_err(tsseipc->dev, "msg format error\n"); + return -EFAULT; } h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, msg->header.i_len - sizeof(struct ipc_header)); writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); @@ -48,41 +51,30 @@ int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) return 0; } -void fw_free(void *msg_t) -{ - struct tsse_msg *tssemsg; - struct ipc_msg *payload; - - payload = (struct ipc_msg *)msg_t; - tssemsg = container_of(payload, struct tsse_msg, ipc_payload); - - kvfree(tssemsg); -} - -int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) +/** + * get_firmware_version() - Get version information from firmware + * @fw: firmware pointer + * @fw_version_out: firmware version string output + * Return: 0 on success, error code otherwise + */ +int get_firmware_version(const struct firmware *fw, char *fw_version_out) { - char *pattern; - char *space_ch = " "; + const char *pattern = SEARCH_PATTERN; + const uint8_t *fw_buffer = fw->data; uint32_t pattern_i = 0, buffer_i = 0; - uint32_t pattern_len = SEARCH_PATTERN_LEN - 1; // Not include "\0" + uint32_t pattern_len = strlen(pattern); // Not include "\0" uint32_t version_start = 0; uint32_t version_len = 0; - pattern = kzalloc(SEARCH_PATTERN_LEN, GFP_KERNEL); - if (!pattern) - return -1; - - snprintf(pattern, SEARCH_PATTERN_LEN, SEARCH_PATTERN); - - while (buffer_i < buffer_len) { - if (pattern[pattern_i] == fw_buffer[buffer_i]) { + while (buffer_i < fw->size) { + if (pattern[pattern_i] == (char) fw_buffer[buffer_i]) { buffer_i++; pattern_i++; } if (pattern_i == pattern_len) { break; // pattern found - } else if ((buffer_i < buffer_len) && - (pattern[pattern_i] != fw_buffer[buffer_i])) { + } else if ((buffer_i < fw->size) && + (pattern[pattern_i] != (char) fw_buffer[buffer_i])) { // mismatch after pattern_i matches if (pattern_i != 0) { // since the pattern has no common prefix, when mismatch, @@ -93,22 +85,29 @@ int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) } } } - kfree(pattern); if (pattern_i == pattern_len) { buffer_i++; version_start = buffer_i; - while (buffer_i < buffer_len) { - if (fw_buffer[buffer_i] == space_ch[0]) { + while (buffer_i < fw->size) { + if (fw_buffer[buffer_i] == SPACE_CH) { version_len = buffer_i - version_start; - strscpy(fw_version, fw_buffer + version_start, version_len + 1); + if (version_len >= TSSE_FW_VERSION_LEN - 1) + version_len = TSSE_FW_VERSION_LEN - 2; + strscpy(fw_version_out, fw_buffer + version_start, version_len + 1); return 0; } buffer_i++; } } - return -1; + return -EINVAL; } +/** + * fw_service() - Firmware service to handle IPC message from mainCPU. + * It will write init or manual load firmware to PCIe BAR and send message back. + * @tsseipc_t: pointer to a structure used for IPC + * @msg_t: pointer to IPC message + */ void fw_service(void *tsseipc_t, void *msg_t) { void __iomem *fw; @@ -120,16 +119,15 @@ void fw_service(void *tsseipc_t, void *msg_t) struct ipc_msg *msg = (struct ipc_msg *)msg_t; task_offset = sizeof(struct msg_info); - fw_task = (struct fw_load *)(msg->i_data + - task_offset / sizeof(uint32_t)); - + fw_task = (struct fw_load *)((uint8_t *)msg->i_data + task_offset); tdev = pci_to_tsse_dev(tsseipc->pdev); + if (!tdev || !tdev->fw) { fw_task->result = 1; fw_task->size = 0; dev_info(tsseipc->dev, "firmware loading failed\n"); - fw_send_msg(tsseipc, msg); - fw_free(msg); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); return; } @@ -140,24 +138,33 @@ void fw_service(void *tsseipc_t, void *msg_t) memcpy_toio((u8 *)fw, tdev->fw->data, size); dev_info(tsseipc->dev, "firmware loading done\n"); - fw_send_msg(tsseipc, msg); - fw_free(msg); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); - dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + if (tdev->fw_version_exist) + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); if (tdev->fw) { release_firmware(tdev->fw); tdev->fw = NULL; + memset(tdev->fw_version, 0, TSSE_FW_VERSION_LEN); + tdev->fw_version_exist = false; } } -int tsse_fw_load(struct pci_dev *pdev) +/** + * tsse_fw_load() - Load firmware from /lib/firmware + * @pdev: pci device + * @name: firmware file name + * @fw: pointer to firmware pointer + * Return: 0 on success, error code otherwise + */ +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw) { int result; - struct tsse_dev *tdev = pci_to_tsse_dev(pdev); - result = request_firmware(&tdev->fw, TSSE_FIRMWARE, &pdev->dev); + result = request_firmware(fw, name, &pdev->dev); if (result) - dev_err(&pdev->dev, "%s failed\n", __func__); + dev_err(&pdev->dev, "%s failed for %s\n", __func__, name); return result; } diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h index 973ca6a0bce9..706ea6d29769 100644 --- a/drivers/crypto/montage/tsse/tsse_fw_service.h +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -8,10 +8,12 @@ #ifndef __TSSE_FW_SERVICE_H__ #define __TSSE_FW_SERVICE_H__ +#include + #define FW_BASE 0x7000000 #define TSSE_FIRMWARE "tsse_firmware.bin" void fw_service(void *tsseipc_t, void *msg_t); -int tsse_fw_load(struct pci_dev *pdev); -int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version); +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw); +int get_firmware_version(const struct firmware *fw, char *fw_version_out); #endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c index 0f92c096f211..b75ca97db6b6 100644 --- a/drivers/crypto/montage/tsse/tsse_ipc.c +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -14,60 +14,43 @@ #include "tsse_dev.h" #include "tsse_service.h" -struct tsse_msg *get_msginf(void __iomem *d2h) +/** + * get_msginf() - Create ipc_msg and read message from BAR. + * Return the pointer to ipc_msg, the caller is responsible for free it. + * @d2h: device2host memory pointer + * Return: new ipc_msg pointer, which points to message read from device + */ +static struct ipc_msg *get_msginf(void __iomem *d2h) { - uint32_t u_len; - struct tsse_msg *tssemsg; + uint32_t u_len = 0; + struct ipc_msg *msg = NULL; + uint8_t *device_msg_data = NULL; struct ipc_header *ipc_info = (struct ipc_header *)d2h; + // The memory layout in d2h should at least contains: + // ipc_header, msg_info and fw_load (message body) + if (ipc_info->i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + pr_info("%s(): msg format error\n", __func__); + return NULL; + } u_len = ipc_info->i_len - sizeof(struct ipc_header); - - tssemsg = (struct tsse_msg *)(kzalloc(sizeof(struct tsse_msg) + u_len, + msg = (struct ipc_msg *)(kzalloc(sizeof(struct ipc_msg) + u_len, GFP_ATOMIC)); - - if (!tssemsg) { - pr_info("%s(): tssemsg kzalloc failed\n", __func__); + if (!msg) { + pr_info("%s(): ipc_msg kzalloc failed\n", __func__); return NULL; } - tssemsg->ipc_payload.header.inst_id = ipc_info->inst_id; - tssemsg->ipc_payload.header.tgid = ipc_info->tgid; - tssemsg->ipc_payload.header.i_len = ipc_info->i_len; - - return tssemsg; -} - -void ipc_recieve_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) -{ - uint32_t u_len = msg->header.i_len - sizeof(struct ipc_header); - uint32_t *msg_data = NULL; - void __iomem *d2h = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; - - msg_data = (uint32_t *)(d2h + sizeof(struct ipc_header)); - memcpy_fromio(msg->i_data, msg_data, u_len); - return; - -} - -int msg_rout(struct tsse_ipc *tsseipc, struct tsse_msg *tssemsg) -{ - int ret = 0; - struct ipc_msg *msg; - struct msg_info *info; - uint32_t msg_class; + msg->header.inst_id = ipc_info->inst_id; + msg->header.tgid = ipc_info->tgid; + msg->header.i_len = ipc_info->i_len; - msg = &tssemsg->ipc_payload; + device_msg_data = (uint8_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio((uint8_t *)msg->i_data, device_msg_data, u_len); - ipc_recieve_msg(tsseipc, msg); - info = (struct msg_info *)msg->i_data; - msg_class = info->msg_class; - if (msg_class == IPC_MESSAGE_BOOT) { - service_rout(tsseipc, msg); - return 0; - } - - return ret; + return msg; } static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) @@ -90,70 +73,59 @@ bool check_send_enbit(struct tsse_ipc *tsseipc) else return false; } -EXPORT_SYMBOL(check_send_enbit); void notify_device(struct tsse_ipc *tsseipc) { writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); return; -} -EXPORT_SYMBOL(notify_device); - -void ipc_send_msg(struct tsse_ipc *tsseipc, struct ipc_data *msg) -{ - u8 *h2d = NULL; - - h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); - memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_ptr, - msg->header.i_len - sizeof(struct ipc_header)); - return; - } -void ipc_hw_init(struct tsse_ipc *hw_ipc) +/** + * ipc_hw_init()- Enable main2host interrupt, cleanup interrupt + * set value in host2main and main2host. + * @hw_ipc: pointer to a structure used for IPC + */ +static void ipc_hw_init(struct tsse_ipc *hw_ipc) { writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); } -int ipc_init_msg(struct tsse_ipc *tsseipc) +static int ipc_init_msg(struct tsse_ipc *tsseipc) { u8 *h2d; u32 int_reg; - u32 rc; u32 cmd_len; + u32 i_len; struct ipc_msg *msg; struct msg_info *info_msg; - msg = (struct ipc_msg *)(kzalloc( - sizeof(struct ipc_msg) + sizeof(struct msg_info), GFP_ATOMIC)); + cmd_len = sizeof(uint32_t); + i_len = sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg = (struct ipc_msg *)(kzalloc(i_len, GFP_ATOMIC)); if (!msg) { pr_info("%s(): msg kzalloc failed\n", __func__); - return -1; + return -EFAULT; } - cmd_len = sizeof(uint32_t); - msg->header.i_len = - sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg->header.i_len = i_len; info_msg = (struct msg_info *)msg->i_data; info_msg->msg_class = IPC_MESSAGE_BASIC; - *(msg->i_data + sizeof(struct msg_info) / 4) = IPC_BASIC_CMD_HOST_INIT; + *(uint32_t *)((uint8_t *)msg->i_data + sizeof(struct msg_info)) = IPC_BASIC_CMD_HOST_INIT; mutex_lock(&tsseipc->list_lock); int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); if ((int_reg & IPC_REGISTER_INT_SET) != 0) { - rc = -1; mutex_unlock(&tsseipc->list_lock); kfree(msg); - return rc; + return -EFAULT; } h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, sizeof(struct msg_info) + sizeof(uint32_t)); writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); @@ -168,13 +140,15 @@ static void tsse_ipc_bh_handler(unsigned long data) struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; - struct tsse_msg *msg_tsse = get_msginf(d2h_payload); + struct ipc_msg *msg = get_msginf(d2h_payload); - if (!msg_tsse) { + if (!msg) { dev_err(tsseipc->dev, "get_msginf is NULL\n"); return; } - msg_rout(tsseipc, msg_tsse); + if (service_rout(tsseipc, msg)) + dev_err(tsseipc->dev, "illegal message class\n"); + kfree(msg); } int tsse_ipc_init(struct pci_dev *pdev) @@ -198,12 +172,18 @@ int tsse_ipc_init(struct pci_dev *pdev) rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, tsse_ipc_d2h_irqhandler, IRQF_SHARED, "pf-ipc", ipc); + if (rc) { + dev_err(&pdev->dev, "request_threaded_irq failed\n"); + return rc; + } ipc_hw_init(ipc); - ipc_init_msg(ipc); - + rc = ipc_init_msg(ipc); + if (rc) { + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + tsse_ipc_deinit(tdev); + } return rc; } -EXPORT_SYMBOL_GPL(tsse_ipc_init); void tsse_ipc_deinit(void *tdev_t) { @@ -214,8 +194,23 @@ void tsse_ipc_deinit(void *tdev_t) tdev = tdev_t; tsseipc = tdev->ipc; pdev = tsseipc->pdev; - free_irq(pci_irq_vector(pdev, 0), tdev->ipc); - return; + if (tsseipc) { + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + tdev->ipc = NULL; + } +} +int tsse_fw_manual_load_ipc(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc = tdev->ipc; + int rc = -EFAULT; + + if (ipc) { + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + } + return rc; } -EXPORT_SYMBOL_GPL(tsse_ipc_deinit); diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h index 59dcbf6eafc4..82f8df71c983 100644 --- a/drivers/crypto/montage/tsse/tsse_ipc.h +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -38,11 +38,11 @@ enum IPC_BASIC_CMD { IPC_BASIC_CMD_HOST_INIT = 0x1, - IPC_BASIC_CMD_PING = 0x2, + IPC_BASIC_CMD_PING = 0x2 }; enum IPC_BOOT_CMD { - IPC_BOOT_CMD_GET_FIRMWARE = 0x1, + IPC_BOOT_CMD_GET_FIRMWARE = 0x1 }; enum IPC_MESSAGE_CLASS { @@ -62,11 +62,6 @@ struct ipc_header { uint32_t reserved[2]; }; -struct ipc_data { - struct ipc_header header; - void *i_ptr; -}; - struct ipc_msg { struct ipc_header header; uint32_t i_data[]; @@ -92,11 +87,6 @@ struct ipc_layout { struct msg_info info; }; -struct tsse_msg { - struct list_head list; - struct ipc_msg ipc_payload; -}; - struct tsse_ipc { struct device *dev; struct pci_dev *pdev; @@ -107,6 +97,7 @@ struct tsse_ipc { int tsse_ipc_init(struct pci_dev *pdev); void tsse_ipc_deinit(void *tdev); +int tsse_fw_manual_load_ipc(struct pci_dev *pdev); bool check_send_enbit(struct tsse_ipc *tsseipc); void notify_device(struct tsse_ipc *tsseipc); #endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c index 64121a655803..e4be85535b77 100644 --- a/drivers/crypto/montage/tsse/tsse_service.c +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -5,14 +5,13 @@ * Copyright © 2023 Montage Technology. All rights reserved. */ #include -#include "tsse_ipc.h" -#include "tsse_fw_service.h" +#include "tsse_service.h" int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) { struct msg_info *info; uint32_t msg_class; - int ret; + int ret = 0; info = (struct msg_info *)msg->i_data; msg_class = info->msg_class; @@ -25,6 +24,5 @@ int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) ret = -EINVAL; break; } - return 0; - + return ret; } -- Gitee From 2f069ae5a14800739d735eb4c9b93029a74f2bf6 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 11 Aug 2021 11:31:59 +0800 Subject: [PATCH 836/953] anolis: efi: cper: print raw data info of estatus for Yitian SoC ANBZ: #8642 To report error type more clearly, add raw data of estatus. The raw data follows any Generic Error Data Entries. And it includes two levels of information. The top level is a raw data header structure which include its type, subtype and the count of ras_reg_common. The next level is one or more ras_reg_common structures providing the detail specific information about the error. Now, we just dump the raw data. Signed-off-by: Shuai Xue Reviewed-by: luanshi Reviewed-by: Baolin Wang [Ruidong: use pr_info instead of printk to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 28 +++++++++++++++++ include/acpi/ghes.h | 60 +++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781..2860a1efb133 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -630,6 +630,9 @@ void cper_estatus_print(const char *pfx, int sec_no = 0; char newpfx[64]; __u16 severity; + struct raw_data_header *r_data_header; + struct ras_reg_common *reg_common; + int sub_record_no = 0; severity = estatus->error_severity; if (severity == CPER_SEV_CORRECTED) @@ -643,6 +646,31 @@ void cper_estatus_print(const char *pfx, cper_estatus_print_section(newpfx, gdata, sec_no); sec_no++; } + + r_data_header = (struct raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!r_data_header->ras_count) + return; + + pr_info("%s type: 0x%x, ras_count: %d\n", pfx, r_data_header->type, + r_data_header->ras_count); + + apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { + pr_info("%s sub_type: 0x%x\n", pfx, + r_data_header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, reg_common->fr, reg_common->ctrl, + reg_common->status, reg_common->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, reg_common->misc0, reg_common->misc1, + reg_common->misc2, reg_common->misc3); + sub_record_no++; + } } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index be1dd4c1a917..af0fe2873b3a 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -133,4 +133,64 @@ static inline int ghes_notify_sea(void) { return -ENOENT; } struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); + +#pragma pack(1) +struct raw_data_header { + uint32_t signature; /* 'r' 'a' 'w' 'd' */ + uint8_t type; + uint8_t ras_count; + /* one record may have multiple sub-record (up to 6) */ + uint8_t sub_type[6]; +}; + +struct ras_reg_common { + uint64_t fr; + uint64_t ctrl; + uint64_t status; + uint64_t addr; + uint64_t misc0; + uint64_t misc1; + uint64_t misc2; + uint64_t misc3; +}; + +enum ras_type { + ERR_TYPE_GENERIC = 0x40, + ERR_TYPE_CORE = 0x41, + ERR_TYPE_GIC = 0x42, + ERR_TYPE_CMN = 0x43, + ERR_TYPE_SMMU = 0x44, + ERR_TYPE_DDR = 0x50, + ERR_TYPE_PCI = 0x60 +}; +enum cmn_node_type { + NODE_TYPE_DVM = 0x1, + NODE_TYPE_CFG = 0x2, + NODE_TYPE_DTC = 0x3, + NODE_TYPE_HN_I = 0x4, + NODE_TYPE_HN_F = 0x5, + NODE_TYPE_XP = 0x6, + NODE_TYPE_SBSX = 0x7, + NODE_TYPE_MPAM_S = 0x8, + NODE_TYPE_MPAM_NS = 0x9, + NODE_TYPE_RN_I = 0xA, + NODE_TYPE_RN_D = 0xD, + NODE_TYPE_RN_SAM = 0xF, + NODE_TYPE_HN_P = 0x11, + /* Coherent Multichip Link (CML) node types */ + NODE_TYPE_CML_BASE = 0x100, + NODE_TYPE_CXRA = 0x100, + NODE_TYPE_CXHA = 0x101, + NODE_TYPE_CXLA = 0x102, + NODE_TYPE_CCRA = 0x103, + NODE_TYPE_CCHA = 0x104, + NODE_TYPE_CCLA = 0x105, +}; +#pragma pack() + +#define apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) \ + for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ + (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ + reg_common = (((void *)(reg_common)) + 1)) + #endif /* GHES_H */ -- Gitee From 5e6d1f0a5d7f915146ca25dd1d51e164a472c678 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 10 Feb 2022 21:46:47 +0800 Subject: [PATCH 837/953] anolis: efi: cper: print error type string of raw data info ANBZ: #8642 The error type number of raw data info is not readable, so print the error type string. Signed-off-by: Shuai Xue Reviewed-by: luanshi Reviewed-by: Baolin Wang Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 2860a1efb133..d4559af9705b 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -623,6 +623,20 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +static char *cper_raw_err_type_str(u64 type) +{ + switch (type) { + case 0x40: return "GENERIC"; + case 0x41: return "CORE"; + case 0x42: return "GIC"; + case 0x43: return "CMN"; + case 0x44: return "SMMU"; + case 0x50: return "DDR"; + case 0x60: return "PCI"; + default: return "Reserved"; + } +} + void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -657,7 +671,8 @@ void cper_estatus_print(const char *pfx, if (!r_data_header->ras_count) return; - pr_info("%s type: 0x%x, ras_count: %d\n", pfx, r_data_header->type, + pr_info("%s type: %s (0x%x), ras_count:%d\n", pfx, + cper_raw_err_type_str(r_data_header->type), r_data_header->type, r_data_header->ras_count); apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { -- Gitee From 5dd708307a0540300254d3291d15300996f0d498 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 9 Aug 2022 12:58:07 +0800 Subject: [PATCH 838/953] anolis: efi: cper: move Yitian specific raw data handling into CONFIG_YITIAN_CPER_RAWDATA ANBZ: #8642 Raw data is part of estatus and its format is vendor defined. Add a separate config CONFIG_YITIAN_CPER_RAWDATA to enable or disable raw data for Yitian 710 and move Yitian specific raw data handling into CONFIG_YITIAN_CPER_RAWDATA. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui [Ruidong: Modify Kconfig help description to fix checkpath warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/Kconfig | 9 +++++++++ drivers/firmware/efi/cper.c | 4 ++++ include/acpi/ghes.h | 3 +++ 3 files changed, 16 insertions(+) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 138491a4b494..46d1a358aabb 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -301,3 +301,12 @@ config UEFI_CPER_X86 bool depends on UEFI_CPER && X86 default y + +config YITIAN_CPER_RAWDATA + bool "Print Yitian custom raw data about platform error info" + depends on EFI && ACPI + help + Allow print Yitian custom raw data about platform error info, + including CMN, GIC, SMMU, DDR, etc. It gathers more useful error + information from hardware, which helps to debug and test RAS + feature. diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index d4559af9705b..5909deb0b176 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -623,6 +623,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA static char *cper_raw_err_type_str(u64 type) { switch (type) { @@ -636,6 +637,7 @@ static char *cper_raw_err_type_str(u64 type) default: return "Reserved"; } } +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) @@ -661,6 +663,7 @@ void cper_estatus_print(const char *pfx, sec_no++; } +#ifdef CONFIG_YITIAN_CPER_RAWDATA r_data_header = (struct raw_data_header *)((void *)estatus + estatus->raw_data_offset); /* @@ -686,6 +689,7 @@ void cper_estatus_print(const char *pfx, reg_common->misc2, reg_common->misc3); sub_record_no++; } +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index af0fe2873b3a..04269449927e 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -134,6 +134,7 @@ struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); +#ifdef CONFIG_YITIAN_CPER_RAWDATA #pragma pack(1) struct raw_data_header { uint32_t signature; /* 'r' 'a' 'w' 'd' */ @@ -163,6 +164,7 @@ enum ras_type { ERR_TYPE_DDR = 0x50, ERR_TYPE_PCI = 0x60 }; + enum cmn_node_type { NODE_TYPE_DVM = 0x1, NODE_TYPE_CFG = 0x2, @@ -192,5 +194,6 @@ enum cmn_node_type { for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ reg_common = (((void *)(reg_common)) + 1)) +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From 8587abba1fac3a09f00fc607fe106c85da93cf93 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 3 Aug 2022 21:13:37 +0800 Subject: [PATCH 839/953] anolis: efi: cper: check Yitian raw data signature ANBZ: #8642 The raw data supported now is only applied to Yitan 710 SoC, add signature check before dumping any info. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 5909deb0b176..69f97b273064 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -666,6 +666,16 @@ void cper_estatus_print(const char *pfx, #ifdef CONFIG_YITIAN_CPER_RAWDATA r_data_header = (struct raw_data_header *)((void *)estatus + estatus->raw_data_offset); + if (estatus->raw_data_length < sizeof(struct raw_data_header)) + return; + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (r_data_header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return; + /* * ONLY processor, CMN, GIC, and SMMU has raw error data which follow * any Generic Error Data Entries. The raw error data format is vendor -- Gitee From ee6ae59c7c9874679260ea83fbc0c1a621d5f6ca Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 20 Jul 2022 18:52:27 +0800 Subject: [PATCH 840/953] anolis: efi: cper: refactor raw data handling for Yitian ANBZ: #8642 The raw data format name for Yitan 710 is misleading, rename with yitian prefix firstly. Refactor core and uncore raw data handler to a separate function so that we could add DDR raw data handler easily later. There should be no functional change as a result of this patch. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 115 +++++++++++++++++++++--------------- include/acpi/ghes.h | 16 ++--- 2 files changed, 77 insertions(+), 54 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 69f97b273064..bcc509c21388 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -624,19 +624,78 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata } #ifdef CONFIG_YITIAN_CPER_RAWDATA -static char *cper_raw_err_type_str(u64 type) +static char *yitian_raw_err_type_str(u64 type) { switch (type) { - case 0x40: return "GENERIC"; - case 0x41: return "CORE"; - case 0x42: return "GIC"; - case 0x43: return "CMN"; - case 0x44: return "SMMU"; - case 0x50: return "DDR"; - case 0x60: return "PCI"; + case ERR_TYPE_GENERIC: return "GENERIC"; + case ERR_TYPE_CORE: return "CORE"; + case ERR_TYPE_GIC: return "GIC"; + case ERR_TYPE_CMN: return "CMN"; + case ERR_TYPE_SMMU: return "SMMU"; + case ERR_TYPE_DDR: return "DDR"; + case ERR_TYPE_PCI: return "PCI"; default: return "Reserved"; } } + +void yitian_platform_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ras_common_reg *common_reg; + int sub_record_no = 0; + + yitian_estatus_for_each_raw_reg_common(header, common_reg) { + pr_info("%s sub_type: 0x%x\n", pfx, + header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, common_reg->fr, common_reg->ctrl, + common_reg->status, common_reg->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, common_reg->misc0, common_reg->misc1, + common_reg->misc2, common_reg->misc3); + sub_record_no++; + } +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (estatus->raw_data_length < sizeof(*header)) + return; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return; + + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!header->common_reg_nr) + return; + + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, + yitian_raw_err_type_str(header->type), header->type, + header->common_reg_nr); + + switch (header->type) { + case ERR_TYPE_CORE: + case ERR_TYPE_GIC: + case ERR_TYPE_CMN: + case ERR_TYPE_SMMU: + yitian_platform_raw_data_print(pfx, header); + break; + } +} #endif /* CONFIG_YITIAN_CPER_RAWDATA */ void cper_estatus_print(const char *pfx, @@ -646,9 +705,6 @@ void cper_estatus_print(const char *pfx, int sec_no = 0; char newpfx[64]; __u16 severity; - struct raw_data_header *r_data_header; - struct ras_reg_common *reg_common; - int sub_record_no = 0; severity = estatus->error_severity; if (severity == CPER_SEV_CORRECTED) @@ -664,41 +720,8 @@ void cper_estatus_print(const char *pfx, } #ifdef CONFIG_YITIAN_CPER_RAWDATA - r_data_header = (struct raw_data_header *)((void *)estatus + - estatus->raw_data_offset); - if (estatus->raw_data_length < sizeof(struct raw_data_header)) - return; - -#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) -#define YITIAN_SIGNATURE_32(A, B, C, D) \ - (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) - - if (r_data_header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) - return; - - /* - * ONLY processor, CMN, GIC, and SMMU has raw error data which follow - * any Generic Error Data Entries. The raw error data format is vendor - * implementation defined. - */ - if (!r_data_header->ras_count) - return; - - pr_info("%s type: %s (0x%x), ras_count:%d\n", pfx, - cper_raw_err_type_str(r_data_header->type), r_data_header->type, - r_data_header->ras_count); - - apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { - pr_info("%s sub_type: 0x%x\n", pfx, - r_data_header->sub_type[sub_record_no]); - pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", - pfx, reg_common->fr, reg_common->ctrl, - reg_common->status, reg_common->addr); - pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", - pfx, reg_common->misc0, reg_common->misc1, - reg_common->misc2, reg_common->misc3); - sub_record_no++; - } + if (estatus->raw_data_length) + yitian_raw_data_print(pfx, estatus); #endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 04269449927e..9f236d61521c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -136,15 +136,15 @@ extern void ghes_unregister_report_chain(struct notifier_block *nb); #ifdef CONFIG_YITIAN_CPER_RAWDATA #pragma pack(1) -struct raw_data_header { +struct yitian_raw_data_header { uint32_t signature; /* 'r' 'a' 'w' 'd' */ uint8_t type; - uint8_t ras_count; + uint8_t common_reg_nr; /* one record may have multiple sub-record (up to 6) */ uint8_t sub_type[6]; }; -struct ras_reg_common { +struct yitian_ras_common_reg { uint64_t fr; uint64_t ctrl; uint64_t status; @@ -155,7 +155,7 @@ struct ras_reg_common { uint64_t misc3; }; -enum ras_type { +enum yitian_ras_type { ERR_TYPE_GENERIC = 0x40, ERR_TYPE_CORE = 0x41, ERR_TYPE_GIC = 0x42, @@ -190,10 +190,10 @@ enum cmn_node_type { }; #pragma pack() -#define apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) \ - for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ - (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ - reg_common = (((void *)(reg_common)) + 1)) +#define yitian_estatus_for_each_raw_reg_common(header, reg) \ + for (reg = (struct yitian_ras_common_reg *)(header + 1); \ + (void *)(reg) - (void *)(header + 1) < header->common_reg_nr; \ + reg = (((void *)(reg)) + 1)) #endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From edea075c0cfbe9e42a2b7015d83c34a318f942e7 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 3 Aug 2022 21:23:26 +0800 Subject: [PATCH 841/953] anolis: efi: cper: add DDR raw data support for Yitian ANBZ: #8642 Firmware reports error information for DDR hardware error through ddr_raw_data, which following any Generic Error Data Entries in ACPI Platform Error Interfaces (APEI). The ddr_raw_data describe exception information, including: - exception: source of synchronous or asynchronous exception - system registers (optional): When a hardware error is consumed by core, synchronous external abort exception will be raised. The system registers of core provides the exception information, e.g. ELR_ELX for preferred exception link address, ESR_ELx for exception syndrome. Firmware reports system registers only when synchronous external abort occurs. - ECC registers (optional): A wide range of information about the detected errors can be obtained by reading the ECC error reporting registers from DDR controller. Firmware reports ECC registers only when ECC error occurs. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui [Ruidong: Modify printk to pr_info to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 49 +++++++++++++++++++++++++++++++++++++ include/acpi/ghes.h | 37 ++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index bcc509c21388..b3c40759fb06 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -657,6 +657,52 @@ void yitian_platform_raw_data_print(const char *pfx, } } +static void yitian_ddr_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ddr_raw_data *data; + + data = (struct yitian_ddr_raw_data *)(header + 1); + + switch (data->ex_type) { + case 0x1: + pr_info("%s Synchronous Exception taken in EL%d\n", pfx, data->el_nr); + break; + case 0x2: + pr_info("%s Interrupt: %d\n", pfx, data->intr); + break; + case 0x3: + pr_info("%s SError\n", pfx); + break; + default: + pr_info("%s Unknown interrupt type\n", pfx); + } + + /* System regs is valid only when it's a synchronous exception */ + if (data->ex_type == 1) { + struct yitian_ddr_sys_reg *sys_regs = &data->sys_regs; + + pr_info("%s ESR: 0x%llx, ELR: 0x%llx, FAR: 0x%llx, SCR: 0x%llx, SCTLR: 0x%llx, LR: 0x%llx\n", + pfx, sys_regs->esr, sys_regs->elr, sys_regs->far, + sys_regs->scr, sys_regs->sctlr, sys_regs->lr); + } + + /* ECC Data is valid only when it's a ECC error */ + if (data->err_type == 1) { + struct yitian_ddr_ecc_data *ecc_data = &data->ecc_data; + + pr_info("%s ECCERRCNT: 0x%x, ECCSTAT: 0x%x, ADVECCSTAT: 0x%x, ECCSYMBOL: 0x%x, ECCERRCNTSTAT: 0x%x, ECCERRCNT0: 0x%x, ECCERRCNT1: 0x%x, ECCCADDR0: 0x%x, ECCCADDR1: 0x%x, ECCCDATA0: 0x%x, ECCCDATA1: 0x%x, ECCUADDR0: 0x%x, ECCUADDR1: 0x%x, ECCUDATA0: 0x%x, ECCUDATA1: 0x%x\n", + pfx, ecc_data->eccerrcnt, ecc_data->eccstat, + ecc_data->adveccstat, ecc_data->eccsymbol, + ecc_data->eccerrcntstat, ecc_data->eccerrcnt0, + ecc_data->eccerrcnt1, ecc_data->ecccaddr0, + ecc_data->ecccaddr1, ecc_data->ecccdata0, + ecc_data->ecccdata1, ecc_data->eccuaddr0, + ecc_data->eccuaddr1, ecc_data->eccudata0, + ecc_data->eccudata1); + } +} + void yitian_raw_data_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -694,6 +740,9 @@ void yitian_raw_data_print(const char *pfx, case ERR_TYPE_SMMU: yitian_platform_raw_data_print(pfx, header); break; + case ERR_TYPE_DDR: + yitian_ddr_raw_data_print(pfx, header); + break; } } #endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 9f236d61521c..a7d853c78da3 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -188,6 +188,43 @@ enum cmn_node_type { NODE_TYPE_CCHA = 0x104, NODE_TYPE_CCLA = 0x105, }; + +struct yitian_ddr_sys_reg { + uint64_t esr; + uint64_t elr; + uint64_t far; + uint64_t scr; + uint64_t sctlr; + uint64_t lr; +}; + +struct yitian_ddr_ecc_data { + uint32_t eccerrcnt; + uint32_t eccstat; + uint32_t adveccstat; + uint32_t eccsymbol; + uint32_t eccerrcntstat; + uint32_t eccerrcnt0; + uint32_t eccerrcnt1; + uint32_t ecccaddr0; + uint32_t ecccaddr1; + uint32_t ecccdata0; + uint32_t ecccdata1; + uint32_t eccuaddr0; + uint32_t eccuaddr1; + uint32_t eccudata0; + uint32_t eccudata1; +}; + +struct yitian_ddr_raw_data { + uint32_t intr; /* interrupt num, valid for interrupt only, for exception intr=0 */ + uint8_t ex_type; /* 1:sync exception 2:interrupt 3:Serror */ + uint8_t el_nr; /* error el, only valid for ex_type==1, 0:el0 1:el1 2:el2 */ + uint8_t err_type; /* 1:ecc 2:CA parity 3:R/W CRC */ + struct yitian_ddr_sys_reg sys_regs; /* Only valid for ex_type==1 */ + struct yitian_ddr_ecc_data ecc_data; /* Only valid for err_type==1 */ +}; + #pragma pack() #define yitian_estatus_for_each_raw_reg_common(header, reg) \ -- Gitee From 86f8509506eb9244cf6def651e5674a51b408aca Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 13 Oct 2022 16:54:20 +0800 Subject: [PATCH 842/953] anolis: efi: cper: fix raw data register iteration ANBZ: #8642 One raw data record may have multiple sub-records. The for_each macro only iterates the first record now. Compare sub_record_no with common_reg_nr when iterates the sub-records. Fixes: acb1ce6cffb8 ("anolis: efi: cper: print raw data info of estatus for Yitian SoC") Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/767 Reviewed-by: Baolin Wang [Ruidong: Modify printk to pr_info to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 3 +-- include/acpi/ghes.h | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index b3c40759fb06..c60793101a7d 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -644,7 +644,7 @@ void yitian_platform_raw_data_print(const char *pfx, struct yitian_ras_common_reg *common_reg; int sub_record_no = 0; - yitian_estatus_for_each_raw_reg_common(header, common_reg) { + yitian_estatus_for_each_raw_reg_common(header, common_reg, sub_record_no) { pr_info("%s sub_type: 0x%x\n", pfx, header->sub_type[sub_record_no]); pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", @@ -653,7 +653,6 @@ void yitian_platform_raw_data_print(const char *pfx, pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", pfx, common_reg->misc0, common_reg->misc1, common_reg->misc2, common_reg->misc3); - sub_record_no++; } } diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index a7d853c78da3..82eba57ac423 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -227,10 +227,10 @@ struct yitian_ddr_raw_data { #pragma pack() -#define yitian_estatus_for_each_raw_reg_common(header, reg) \ +#define yitian_estatus_for_each_raw_reg_common(header, reg, nr) \ for (reg = (struct yitian_ras_common_reg *)(header + 1); \ - (void *)(reg) - (void *)(header + 1) < header->common_reg_nr; \ - reg = (((void *)(reg)) + 1)) + nr < header->common_reg_nr; \ + reg++, nr++) #endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From a1fabbd46a8d76eda8182c98ff6eb054a73c757b Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 1 Dec 2022 19:32:56 +0800 Subject: [PATCH 843/953] anolis: efi: cper: refactor raw data header check to a function ANBZ: #8642 Move yitian raw data header sanity check in a separate function yitian_estatus_check_header() so that it could be used in GHES error handling later. There should be no functional change as a result of this patch. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1019 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/apei-internal.h | 4 ++++ drivers/firmware/efi/cper.c | 21 +++++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 67c2c3b959e1..448370641d1d 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -131,3 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) int apei_osc_setup(void); #endif + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index c60793101a7d..30a62a97ae98 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -702,13 +702,12 @@ static void yitian_ddr_raw_data_print(const char *pfx, } } -void yitian_raw_data_print(const char *pfx, - const struct acpi_hest_generic_status *estatus) +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus) { struct yitian_raw_data_header *header; if (estatus->raw_data_length < sizeof(*header)) - return; + return false; header = (struct yitian_raw_data_header *)((void *)estatus + estatus->raw_data_offset); @@ -718,7 +717,7 @@ void yitian_raw_data_print(const char *pfx, (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) - return; + return false; /* * ONLY processor, CMN, GIC, and SMMU has raw error data which follow @@ -726,8 +725,22 @@ void yitian_raw_data_print(const char *pfx, * implementation defined. */ if (!header->common_reg_nr) + return false; + + return true; +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (!yitian_estatus_check_header(estatus)) return; + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, yitian_raw_err_type_str(header->type), header->type, header->common_reg_nr); -- Gitee From 030f3c3909d4754f22be9fb69ba41ffe032d0612 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 10 Aug 2023 15:58:26 +0800 Subject: [PATCH 844/953] anolis: efi: cper: add ARM64 dependence for YITIAN_CPER_RAWDATA ANBZ: #8642 Add ARM64 dependence for YITIAN_CPER_RAWDATA to avoid it being enabled on other platforms. Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2046 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 46d1a358aabb..b76f9df4885a 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -304,7 +304,7 @@ config UEFI_CPER_X86 config YITIAN_CPER_RAWDATA bool "Print Yitian custom raw data about platform error info" - depends on EFI && ACPI + depends on EFI && ACPI && ARM64 help Allow print Yitian custom raw data about platform error info, including CMN, GIC, SMMU, DDR, etc. It gathers more useful error -- Gitee From 64f8bd00db39140b6c2a0c8db64475102c6aebad Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 31 Jan 2023 12:59:18 +0800 Subject: [PATCH 845/953] anolis: ACPI: APEI: set memory failure flags as MF_ACTION_REQUIRED on synchronous events ANBZ: #8642 There are two major types of uncorrected error (UC) : - Action Required: The error is detected and the processor already consumes the memory. OS requires to take action (for example, offline failure page/kill failure thread) to recover this uncorrectable error. - Action Optional: The error is detected out of processor execution context. Some data in the memory are corrupted. But the data have not been consumed. OS is optional to take action to recover this uncorrectable error. For X86 platform, we can easily distinguish between these two types based on the MCA Bank. While for arm64 platform, the memory failure flags for all UCs which severity are GHES_SEV_RECOVERABLE are set as 0, a.k.a, Action Optional now. The main challenge is to tell whether APEI delivers signals synchronously. There are no hint in industry standard APEI. On Yitian 710, we have all hardware RAS information in custom DDR raw data. yitian_ddr_raw_data::ex_type indicates the signal is a synchronous exception, interrupt, or SError. Set memory failure flags as MF_ACTION_REQUIRED on synchronous events. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1124 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/ghes.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index ab2a82cb1b0b..4f9ee26e01d9 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -673,6 +673,33 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +/* + * Check if the event is synchronous exception by Yitian DDR Raw data + * NOTE: only works for Yitian 710 now + */ +static bool is_sync_event(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + struct yitian_ddr_raw_data *data; + + if (!yitian_estatus_check_header(estatus)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + if (header->type != ERR_TYPE_DDR) + return false; + + data = (struct yitian_ddr_raw_data *)(header + 1); + /* 1 for synchronous exception */ + if (data->ex_type == 1) + return true; + + return false; +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -685,6 +712,10 @@ static bool ghes_do_proc(struct ghes *ghes, bool sync = is_hest_sync_notify(ghes); sev = ghes_severity(estatus->error_severity); +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + sync = is_sync_event(estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ apei_estatus_for_each_section(estatus, gdata) { sec_type = (guid_t *)gdata->section_type; sec_sev = ghes_severity(gdata->error_severity); -- Gitee From b6f1a603b929a197dec5225e333af579f4593063 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 31 Jan 2023 14:03:03 +0800 Subject: [PATCH 846/953] anolis: ACPI: APEI: handle synchronous exceptions in task work ANBZ: #8642 On Arm64 platform, errors could be signaled by synchronous interrupt, e.g. when an error is detected by a background scrubber, or signaled by synchronous exception, e.g. when an uncorrected error is consumed. Both synchronous and asynchronous error are queued and handled by a dedicated kthread in workqueue. commit 7f17b4a121d0 ("ACPI: APEI: Kick the memory_failure() queue for synchronous errors") keep track of whether memory_failure() work was queued, and make task_work pending to flush out the workqueue so that the work for synchronous error is processed before returning to user-space. The trick ensures that the corrupted page is unmapped and poisoned. And after returning to user-space, the task starts at current instruction which triggering a page fault and kernel will send sigbus due to VM_FAULT_HWPOISON. Although the task could be killed by page fault, the memory failure is handled in a kthread context so that the hwpoison-aware mechanisms, e.g. PF_MCE_EARLY, early kill, does not work as expected. For example, hwpoison-aware user-space process like QEMU set PF_MCE_EARLY through prctl while init sigbus handler. Then the early_kill mode of memory_failure() will send wrong si_code by SIGBUS signal: the actual user-space process accessing the corrupt memory location will be collected by find_early_kill_thread(), and then send SIGBUS with BUS_MCEERR_AO si_code to the actual user-space process instead of BUS_MCEERR_AR. The KVM use the si_code: BUS_MCEERR_AO for 'action optional' early notifications, and BUS_MCEERR_AR for 'action required' synchronous/late notifications. To this end, separate synchronous and asynchronous error handling into different paths like X86 platform does: - task work for synchronous error. - and workqueue for asynchronous error. The task work function memory_failure_cb() includes three parts: - poison the page and unmap it, then send SIGBUS with appropriate si_code to the process which accessing the page in memory failure work. - free the task work struct. - send SIGBUS to current if memory failure fails. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1124 [Ruidong: remove unnecessary else] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/ghes.c | 66 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 4f9ee26e01d9..64b6193e2475 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -483,9 +483,62 @@ static void ghes_kick_task_work(struct callback_head *head) gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); } +/* + * Tasks can handle task_work: + * + * - All user task: run task work before return to user. + */ +static bool should_add_task_work(struct task_struct *task) +{ + if (task->mm) + return true; + + return false; +} + +/** + * struct mce_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: fine tune action taken + * + * Structure to pass task work to be handled before + * ret_to_user via task_work_add(). + */ +struct mce_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) +{ + int rc; + struct mce_task_work *twcb = + container_of(twork, struct mce_task_work, twork); + + rc = memory_failure(twcb->pfn, twcb->flags); + kfree(twcb); + + if (!rc) + return; + /* + * -EHWPOISON from memory_failure() means that it already sent SIGBUS + * to the current process with the proper error info, so no need to + * send SIGBUS here again. + */ + if (rc == -EHWPOISON) + return; + + pr_err("Memory error not recovered"); + force_sig(SIGBUS); +} + static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; + struct mce_task_work *twcb; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; @@ -498,7 +551,20 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; } + if (flags == MF_ACTION_REQUIRED && should_add_task_work(current)) { + twcb = kmalloc(sizeof(*twcb), GFP_ATOMIC); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return false; + } + memory_failure_queue(pfn, flags); + return true; } -- Gitee From e328075dd5c180adb19b1b72c344aca6f390c832 Mon Sep 17 00:00:00 2001 From: Yao Hongbo Date: Mon, 7 Mar 2022 14:11:47 +0800 Subject: [PATCH 847/953] anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer ANBZ: #8642 On Alibaba yitian710 Soc, the Hardware does always clear pcie config space and some key registers between resetting the secondary bus. This results in the OS cannot recover the fatal pcie error, which causes unexpected system error finally. Luckily, it seems a simple save/restore of these regs during the bus reset can fix the issues. Signed-off-by: Yao Hongbo Reviewed-by: Baolin Wang Reviewed-by: luanshi Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/pci/pci.c | 70 +++++++++++++++++++++++++++++++++++++++++++- drivers/pci/quirks.c | 15 ++++++++++ include/linux/pci.h | 11 +++++++ 3 files changed, 95 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d278d56824b1..a057967bcf27 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5194,6 +5194,65 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) PCIE_RESET_READY_POLL_MS - delay); } +static void pci_save_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + int i; + u16 ctrl, ctrl2; + + /* if not yitian 710, should return here */ + if (!dev->broken_bus_reset) + return; + + /* save pcie type1 config space header*/ + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + + pci_read_config_word(dev, PCI_EXP_DEVCTL, &ctrl); + pci_read_config_word(dev, PCI_EXP_DEVCTL2, &ctrl2); + + saved->mps = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; + saved->mrrs = (ctrl & PCI_EXP_DEVCTL_READRQ) >> 12; + saved->comp_timeout_val = ctrl2 & PCI_EXP_DEVCTL2_COMP_TIMEOUT; + saved->comp_timeout_dis = (ctrl2 & PCI_EXP_DEVCTL2_COMP_TMOUT_DIS) >> 4; + if (dev->acs_cap) + pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + &saved->acs_cap_ctrl); + + pci_read_config_dword(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl_status); +} + +static void pci_restore_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + u16 regval; + + if (!dev->broken_bus_reset) + return; + + /* restore pcie type1 config space header */ + pci_restore_config_space_range(dev, 0, 15, 0, false); + + regval = (saved->mps) << 5; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_PAYLOAD, regval); + regval = (saved->mrrs) << 12; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, regval); + regval = saved->comp_timeout_val; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, regval); + regval = (saved->comp_timeout_dis) << 4; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, regval); + + if (dev->acs_cap) + pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + saved->acs_cap_ctrl); + + pci_write_config_dword(dev, PCI_EXP_SLTCTL, saved->slot_ctrl_status); +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -5226,9 +5285,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { + int rc; + struct pci_saved_regs saved = { }; + + /* save key regs for yitian710 during bus rest*/ + pci_save_yitian710_regs(dev, &saved); + pcibios_reset_secondary_bus(dev); + rc = pci_bridge_wait_for_secondary_bus(dev, "bus reset"); - return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + /* restore regs for yitian710*/ + pci_restore_yitian710_regs(dev, &saved); + return rc; } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index cffdd0e0b90b..178d8bb1ec8b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -6255,3 +6255,18 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev) pdev->d3cold_delay = 1000; } DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec); +/* + * On Alibaba yitian710 Soc, the Hardware does always clear pcie config space + * and some key registers between resetting the secondary bus. This results in + * the OS cannot recover the fatal pcie error, which causes unexpected system + * error finally. + * + * Luckily, it seems a simple save/restore of these regs during the bus reset + * can fix the issues. + */ +static void quirk_save_yitian710_regs(struct pci_dev *dev) +{ + dev->broken_bus_reset = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ALIBABA, 0x8000, + PCI_CLASS_BRIDGE_PCI, 8, quirk_save_yitian710_regs); diff --git a/include/linux/pci.h b/include/linux/pci.h index f9a04587e4a9..94b9fca514a4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -312,6 +312,16 @@ struct pci_vpd { u8 cap; }; +/* The structure describes the regs to be saved for yitian710 SoC. */ +struct pci_saved_regs { + u8 mps; + u8 mrrs; + u8 comp_timeout_val; + u8 comp_timeout_dis; + u32 acs_cap_ctrl; + u32 slot_ctrl_status; /* should be the last register to restore */ +}; + struct irq_affinity; struct pcie_link_state; struct pci_sriov; @@ -465,6 +475,7 @@ struct pci_dev { unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ + unsigned int broken_bus_reset:1; /* Abnormal bus reset */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ -- Gitee From cdf182dc118f7e20928f7432dd50dd98f0b48f44 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 1 Mar 2023 13:27:13 +0800 Subject: [PATCH 848/953] anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery ANBZ: #8642 When a fatal error has been detected, the AER driver will reset the PCIe physical link to recover the unreliable link. It is a hot reset triggered by setting secondary bus reset (Bit 6) in bridge control register and resulting in a link down. (NOTE: this type of link down will not be reported by a suprise link down error) However, on the Alibaba Yitian 710 SoC, the hardware clears the PCIe config space and some key registers after a link down, preventing the OS from recovering the error and resulting in an unexpected SError. Commit ae314b6cdbc3 ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") adds a quirk to address the SError problem but fatal errors are still not recovered. The root cause is that the cleared registers are not saved/restored correctly. To fix this issue, pcie_capability_read_word() and similar interfaces are used to proform the save/restore procedures. In addition, Root Error Command Register (Offset 2Ch in Advanced Error Reporting Extended Capability) is also cleared so that save/restore procedures are applied to it. Fixes: f89afed29adb ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1508Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/1384 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/pci/pci.c | 44 +++++++++++++++++++++----------------------- include/linux/pci.h | 10 +++++----- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a057967bcf27..9bb054ae6094 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5198,7 +5198,6 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, struct pci_saved_regs *saved) { int i; - u16 ctrl, ctrl2; /* if not yitian 710, should return here */ if (!dev->broken_bus_reset) @@ -5208,49 +5207,48 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, for (i = 0; i < 16; i++) pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); - pci_read_config_word(dev, PCI_EXP_DEVCTL, &ctrl); - pci_read_config_word(dev, PCI_EXP_DEVCTL2, &ctrl2); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &saved->dev_ctrl); + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &saved->root_ctrl); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &saved->dev_ctrl2); - saved->mps = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; - saved->mrrs = (ctrl & PCI_EXP_DEVCTL_READRQ) >> 12; - saved->comp_timeout_val = ctrl2 & PCI_EXP_DEVCTL2_COMP_TIMEOUT; - saved->comp_timeout_dis = (ctrl2 & PCI_EXP_DEVCTL2_COMP_TMOUT_DIS) >> 4; if (dev->acs_cap) pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, &saved->acs_cap_ctrl); + if (dev->aer_cap) + pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + &saved->root_err_cmd); - pci_read_config_dword(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl_status); + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); } static void pci_restore_yitian710_regs(struct pci_dev *dev, struct pci_saved_regs *saved) { - u16 regval; - if (!dev->broken_bus_reset) return; /* restore pcie type1 config space header */ pci_restore_config_space_range(dev, 0, 15, 0, false); - regval = (saved->mps) << 5; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_PAYLOAD, regval); - regval = (saved->mrrs) << 12; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_READRQ, regval); - regval = saved->comp_timeout_val; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_COMP_TIMEOUT, regval); - regval = (saved->comp_timeout_dis) << 4; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, regval); + /* + * restore Device Control, Root Control Register and Device Control 2 + * in PCI Express Capability + */ + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, saved->dev_ctrl); + pcie_capability_write_word(dev, PCI_EXP_RTCTL, saved->root_ctrl); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, saved->dev_ctrl2); + /* restore ACS Capability Register */ if (dev->acs_cap) pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, saved->acs_cap_ctrl); + /* restore AER Root Error Command Register */ + if (dev->aer_cap) + pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + saved->root_err_cmd); - pci_write_config_dword(dev, PCI_EXP_SLTCTL, saved->slot_ctrl_status); + /* restore Slot Control Register */ + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); } void pci_reset_secondary_bus(struct pci_dev *dev) diff --git a/include/linux/pci.h b/include/linux/pci.h index 94b9fca514a4..a15cf1de3937 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -314,12 +314,12 @@ struct pci_vpd { /* The structure describes the regs to be saved for yitian710 SoC. */ struct pci_saved_regs { - u8 mps; - u8 mrrs; - u8 comp_timeout_val; - u8 comp_timeout_dis; + u16 dev_ctrl; + u16 dev_ctrl2; u32 acs_cap_ctrl; - u32 slot_ctrl_status; /* should be the last register to restore */ + u32 root_err_cmd; + u16 root_ctrl; + u16 slot_ctrl; /* should be the last register to restore */ }; struct irq_affinity; -- Gitee From 1132b26cfd2eeba42dc66271f6c8c9012722b5ab Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:32 +0800 Subject: [PATCH 849/953] uaccess: add generic fallback version of copy_mc_to_user() ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. x86/powerpc has it's implementation of copy_mc_to_user(), we add generic fallback in include/linux/uaccess.h prepare for other architechures to enable CONFIG_ARCH_HAS_COPY_MC. Signed-off-by: Tong Tiangen Acked-by: Michael Ellerman Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/x86/include/asm/uaccess.h | 1 + include/linux/uaccess.h | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 237dc8cdd12b..75d860c4dc3b 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -497,6 +497,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len); unsigned long __must_check copy_mc_to_user(void __user *to, const void *from, unsigned len); +#define copy_mc_to_user copy_mc_to_user #endif /* diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3064314f4832..550287c92990 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -205,6 +205,15 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt) } #endif +#ifndef copy_mc_to_user +static inline unsigned long __must_check +copy_mc_to_user(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; -- Gitee From 6381b5e525d4ad0aab95b66058cc860879950504 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:33 +0800 Subject: [PATCH 850/953] arm64: add support for machine check error safe ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. For the arm64 kernel, when it processes hardware memory errors for synchronize notifications(do_sea()), if the errors is consumed within the kernel, the current processing is panic. However, it is not optimal. Take uaccess for example, if the uaccess operation fails due to memory error, only the user process will be affected. Killing the user process and isolating the corrupt page is a better choice. This patch only enable machine error check framework and adds an exception fixup before the kernel panic in do_sea(). Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/extable.h | 1 + arch/arm64/mm/extable.c | 16 ++++++++++++++++ arch/arm64/mm/fault.c | 29 ++++++++++++++++++++++++++++- 4 files changed, 46 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8d631f7ba66d..bf872dd6b9c4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_COPY_MC if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 72b0e71cc3de..f80ebd0addfd 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex, #endif /* !CONFIG_BPF_JIT */ bool fixup_exception(struct pt_regs *regs); +bool fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 228d681a8715..478e639f8680 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -76,3 +76,19 @@ bool fixup_exception(struct pt_regs *regs) BUG(); } + +bool fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *ex; + + ex = search_exception_tables(instruction_pointer(regs)); + if (!ex) + return false; + + /* + * This is not complete, More Machine check safe extable type can + * be processed here. + */ + + return false; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af9..a6d1c333719f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -728,6 +728,31 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -753,7 +778,9 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } -- Gitee From 7398d01b5b254198eeb5c35f61fdf800e7b77ddd Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:34 +0800 Subject: [PATCH 851/953] arm64: add uaccess to machine check safe ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. If user process access memory fails due to hardware memory error, only the relevant processes are affected, so it is more reasonable to kill the user process and isolate the corrupt page than to panic the kernel. Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/mm/extable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 478e639f8680..28ec35e3d210 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -85,10 +85,10 @@ bool fixup_exception_mc(struct pt_regs *regs) if (!ex) return false; - /* - * This is not complete, More Machine check safe extable type can - * be processed here. - */ + switch (ex->type) { + case EX_TYPE_UACCESS_ERR_ZERO: + return ex_handler_uaccess_err_zero(ex, regs); + } return false; } -- Gitee From 2055a51b9cb14c0758a093dd33abdd691cdc48f2 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:36 +0800 Subject: [PATCH 852/953] arm64: support copy_mc_[user]_highpage() ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. Currently, many scenarios that can tolerate memory errors when copying page have been supported in the kernel[1][2][3], all of which are implemented by copy_mc_[user]_highpage(). arm64 should also support this mechanism. Due to mte, arm64 needs to have its own copy_mc_[user]_highpage() architecture implementation, macros __HAVE_ARCH_COPY_MC_HIGHPAGE and __HAVE_ARCH_COPY_MC_USER_HIGHPAGE have been added to control it. Add new helper copy_mc_page() which provide a page copy implementation with machine check safe. The copy_mc_page() in copy_mc_page.S is largely borrows from copy_page() in copy_page.S and the main difference is copy_mc_page() add extable entry to every load/store insn to support machine check safe. Add new extable type EX_TYPE_COPY_MC_PAGE_ERR_ZERO which used in copy_mc_page(). [1]a873dfe1032a ("mm, hwpoison: try to recover from copy-on write faults") [2]5f2500b93cc9 ("mm/khugepaged: recover from poisoned anonymous memory") [3]6b970599e807 ("mm: hwpoison: support recovery from ksm_might_need_to_copy()") Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/include/asm/asm-extable.h | 15 +++++ arch/arm64/include/asm/assembler.h | 4 ++ arch/arm64/include/asm/mte.h | 5 ++ arch/arm64/include/asm/page.h | 10 ++++ arch/arm64/lib/Makefile | 2 + arch/arm64/lib/copy_mc_page.S | 89 ++++++++++++++++++++++++++++ arch/arm64/lib/mte.S | 27 +++++++++ arch/arm64/mm/copypage.c | 59 ++++++++++++++++-- arch/arm64/mm/extable.c | 7 ++- include/linux/highmem.h | 4 ++ 10 files changed, 213 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/lib/copy_mc_page.S diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 980d1dd8e1a3..819044fefbe7 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -10,6 +10,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_KACCESS_ERR_ZERO 3 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 +#define EX_TYPE_COPY_MC_PAGE_ERR_ZERO 5 /* Data fields for EX_TYPE_UACCESS_ERR_ZERO */ #define EX_DATA_REG_ERR_SHIFT 0 @@ -51,6 +52,16 @@ #define _ASM_EXTABLE_UACCESS(insn, fixup) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) +#define _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, err, zero) \ + __ASM_EXTABLE_RAW(insn, fixup, \ + EX_TYPE_COPY_MC_PAGE_ERR_ZERO, \ + ( \ + EX_DATA_REG(ERR, err) | \ + EX_DATA_REG(ZERO, zero) \ + )) + +#define _ASM_EXTABLE_COPY_MC_PAGE(insn, fixup) \ + _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, wzr, wzr) /* * Create an exception table entry for uaccess `insn`, which will branch to `fixup` * when an unhandled fault is taken. @@ -59,6 +70,10 @@ _ASM_EXTABLE_UACCESS(\insn, \fixup) .endm + .macro _asm_extable_copy_mc_page, insn, fixup + _ASM_EXTABLE_COPY_MC_PAGE(\insn, \fixup) + .endm + /* * Create an exception table entry for `insn` if `fixup` is provided. Otherwise * do nothing. diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 376a980f2bad..547ab2f85888 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -154,6 +154,10 @@ lr .req x30 // link register #define CPU_LE(code...) code #endif +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_extable_copy_mc_page 9999b, l + /* * Define a macro that constructs a 64-bit value by concatenating two * 32-bit registers. Note that on big endian systems the order of the diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 4cedbaa16f41..79474232d413 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -93,6 +93,7 @@ void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); +int mte_copy_mc_page_tags(void *kto, const void *kfrom); void mte_thread_switch(struct task_struct *next); void mte_cpu_setup(void); void mte_suspend_enter(void); @@ -131,6 +132,10 @@ static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void mte_thread_init_user(void) { } +static inline int mte_copy_mc_page_tags(void *kto, const void *kfrom) +{ + return 0; +} static inline void mte_thread_switch(struct task_struct *next) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595f..62bdc843e3e7 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,6 +36,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, void tag_clear_highpage(struct page *to); #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE +#ifdef CONFIG_ARCH_HAS_COPY_MC +int copy_mc_page(void *to, const void *from); +int copy_mc_highpage(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_MC_HIGHPAGE + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_MC_USER_HIGHPAGE +#endif + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 29490be2546b..a2fd865b816d 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -15,6 +15,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_mc_page.S b/arch/arm64/lib/copy_mc_page.S new file mode 100644 index 000000000000..656d831ef4b8 --- /dev/null +++ b/arch/arm64/lib/copy_mc_page.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + * Returns: + * x0 - Return 0 if copy success, or -EFAULT if anything goes wrong + * while copying. + */ +SYM_FUNC_START(__pi_copy_mc_page) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret + +SYM_FUNC_END(__pi_copy_mc_page) +SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page) +EXPORT_SYMBOL(copy_mc_page) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 5018ac03b6bf..2b748e83f6cf 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -80,6 +80,33 @@ SYM_FUNC_START(mte_copy_page_tags) ret SYM_FUNC_END(mte_copy_page_tags) +/* + * Copy the tags from the source page to the destination one wiht machine check safe + * x0 - address of the destination page + * x1 - address of the source page + * Returns: + * x0 - Return 0 if copy success, or + * -EFAULT if anything goes wrong while copying. + */ +SYM_FUNC_START(mte_copy_mc_page_tags) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: +CPY_MC(2f, ldgm x4, [x3]) +CPY_MC(2f, stgm x4, [x2]) + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b + + mov x0, #0 + ret + +2: mov x0, #-EFAULT + ret +SYM_FUNC_END(mte_copy_mc_page_tags) + /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a7bb20055ce0..b062c925daa4 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -14,6 +14,21 @@ #include #include +static int do_mte(struct page *to, struct page *from, void *kto, void *kfrom, bool mc) +{ + int ret = 0; + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + if (mc) + ret = mte_copy_mc_page_tags(kto, kfrom); + else + mte_copy_page_tags(kto, kfrom); + } + + return ret; +} + void copy_highpage(struct page *to, struct page *from) { void *kto = page_address(to); @@ -24,12 +39,7 @@ void copy_highpage(struct page *to, struct page *from) if (kasan_hw_tags_enabled()) page_kasan_tag_reset(to); - if (system_supports_mte() && page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); - mte_copy_page_tags(kto, kfrom); - set_page_mte_tagged(to); - } + do_mte(to, from, kto, kfrom, false); } EXPORT_SYMBOL(copy_highpage); @@ -40,3 +50,40 @@ void copy_user_highpage(struct page *to, struct page *from, flush_dcache_page(to); } EXPORT_SYMBOL_GPL(copy_user_highpage); + +#ifdef CONFIG_ARCH_HAS_COPY_MC +/* + * Return -EFAULT if anything goes wrong while copying page or mte. + */ +int copy_mc_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + int ret; + + ret = copy_mc_page(kto, kfrom); + if (ret) + return -EFAULT; + + ret = do_mte(to, from, kto, kfrom, true); + if (ret) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(copy_mc_highpage); + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + int ret; + + ret = copy_mc_highpage(to, from); + + if (!ret) + flush_dcache_page(to); + + return ret; +} +EXPORT_SYMBOL_GPL(copy_mc_user_highpage); +#endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 28ec35e3d210..bdc81518d207 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,7 +16,7 @@ get_ex_fixup(const struct exception_table_entry *ex) return ((unsigned long)&ex->fixup + ex->fixup); } -static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, +static bool ex_handler_fixup_err_zero(const struct exception_table_entry *ex, struct pt_regs *regs) { int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); @@ -69,7 +69,7 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: case EX_TYPE_KACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + return ex_handler_fixup_err_zero(ex, regs); case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: return ex_handler_load_unaligned_zeropad(ex, regs); } @@ -87,7 +87,8 @@ bool fixup_exception_mc(struct pt_regs *regs) switch (ex->type) { case EX_TYPE_UACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_COPY_MC_PAGE_ERR_ZERO: + return ex_handler_fixup_err_zero(ex, regs); } return false; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26c..bf4ea9f2f457 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -371,19 +371,23 @@ static inline int copy_mc_highpage(struct page *to, struct page *from) return ret; } #else +#ifndef __HAVE_ARCH_COPY_MC_USER_HIGHPAGE static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } +#endif +#ifndef __HAVE_ARCH_COPY_MC_HIGHPAGE static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif +#endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, -- Gitee From 8b9386a2c418cebbc596a05aeafd928a9094575d Mon Sep 17 00:00:00 2001 From: chench00 Date: Tue, 2 Apr 2024 11:01:42 +0800 Subject: [PATCH 853/953] anolis: crypto: command co-processor: Add another mailbox interrupt support for PSP sending command to X86 ANBZ: #8670 The existing kernel supports only interrupt for the mailbox interface for X86 sending commands to PSP and PSP to ack, e.g. the SEV commands. However, some PSP-based security modules in Hygon CPU, such as TPCM and TDM(Trusted Dynamic Measuring), needs sending commands/notifications proactively to X86 core via interrupt and a 2nd mailbox interface. Similar to the existing one, the 2nd mailbox consists of a 32-bits command register and two 32-bits data registers. The PSP interrupt handling needs to add this interrupt support; besides, in order to support user defined command handler, a callback registration function is also provided. Up to 16 command callbacks is supported, which are indexed by command IDs. Currently, command ID 0 is assigned to TPCM and 1 to TDM, while others are reserved. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 7 +++ drivers/crypto/ccp/psp-dev.c | 106 ++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/psp-dev.h | 5 ++ drivers/crypto/ccp/sp-dev.h | 5 ++ drivers/crypto/ccp/sp-pci.c | 16 +++++- include/linux/psp-sev.h | 10 ++++ 6 files changed, 147 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 9d5d3312f8e3..d62d628fef20 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -53,6 +53,13 @@ config HYGON_GM help Hygon GM ccp driver +config HYGON_PSP2CPU_CMD + bool "Hygon PSP2CPU Command Interface" + default y + depends on CRYPTO_DEV_SP_PSP + help + Hygon PSP2CPU Command Support + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index c110ae79d93f..47de733084b1 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -137,6 +137,102 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_HYGON_PSP2CPU_CMD +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} +#endif + static void hygon_fixup_psp_caps(struct psp_device *psp) { if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) @@ -599,7 +695,15 @@ int psp_dev_init(struct sp_device *sp) } /* Request an irq */ - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { +#ifdef CONFIG_HYGON_PSP2CPU_CMD + ret = sp_request_psp_irq(psp->sp, psp_irq_handler_hygon, psp->name, psp); +#else + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); +#endif + } else { + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index b0a7bf42e552..694bb3faf8be 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -33,6 +33,11 @@ #define MAX_PSP_NAME_LEN 16 +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + extern struct psp_device *psp_master; typedef void (*psp_irq_handler_t)(int, void *, unsigned int); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 2329ad524b49..d04d9743b680 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -76,6 +76,11 @@ struct psp_vdata { const unsigned int intsts_reg; const unsigned int bootloader_info_reg; const unsigned int platform_features; +#ifdef CONFIG_HYGON_PSP2CPU_CMD + const unsigned int p2c_cmdresp_reg; + const unsigned int p2c_cmdbuff_addr_lo_reg; + const unsigned int p2c_cmdbuff_addr_hi_reg; +#endif }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 5185555a74a7..4f6a0507f7cd 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -129,9 +129,13 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a if (!psp) return 0; - +#ifdef CONFIG_X86 + if (attr == &dev_attr_bootloader_version.attr && + psp->vdata->bootloader_info_reg && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +#else if (attr == &dev_attr_bootloader_version.attr && psp->vdata->bootloader_info_reg) +#endif val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); if (attr == &dev_attr_tee_version.attr && @@ -459,6 +463,11 @@ static const struct psp_vdata pspv1 = { .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; static const struct psp_vdata pspv2 = { @@ -509,6 +518,11 @@ static const struct psp_vdata psp_csvv1 = { .feature_reg = 0x105fc, .inten_reg = 0x10670, .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; #endif diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 1536d0057738..74086c114184 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -677,6 +677,16 @@ struct vpsp_ret { #define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) #define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +#endif + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); -- Gitee From bf4a118506aac13855e8b28c57436445a9fcc3e8 Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:16:34 +0800 Subject: [PATCH 854/953] anolis: crypto: tdm: Add Hygon TDM driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8670 TDM(Trusted Dynamic Measurement) is a module designed and implemented by HYGON in its X86 CPU's embedded secure processor, providing dynamical measurement service to X86 side aiming at memory that needs to be protected, e.g. the memory area kernel code resides. With this new feature, the goal of protecting any specified memory dynamically in the runtime can be achieved. When the protected memory is modified illegally, TDM will detect the event immediately and give an alarm in the form of an exception, meantime, the abnormal information is recorded inside the TDM for subsequent audit or remote attestation. The TDM driver mainly implements the following functions: (1) Send the required memory block information and configuration information to TDM device for protection; (2) Manage the further distribution of exceptions when TDM detects illegal memory modification and an exception is triggered. (3) Record abnormal information for subsequent audit or attestation. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 8 + drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/tdm_hygon.c | 1549 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/tdm_hygon.h | 501 +++++++++++ 4 files changed, 2059 insertions(+) create mode 100644 drivers/crypto/ccp/tdm_hygon.c create mode 100644 drivers/crypto/ccp/tdm_hygon.h diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index d62d628fef20..30902232acce 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -60,6 +60,14 @@ config HYGON_PSP2CPU_CMD help Hygon PSP2CPU Command Support +config TDM_HYGON + tristate "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 0da504999951..0c66b4d5792d 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,6 +25,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_HYGON) += tdm_hygon.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/tdm_hygon.c b/drivers/crypto/ccp/tdm_hygon.c new file mode 100644 index 000000000000..56927265841e --- /dev/null +++ b/drivers/crypto/ccp/tdm_hygon.c @@ -0,0 +1,1549 @@ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm_hygon.h" + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_TDM_HYGON) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_TDM_HYGON) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_TDM_HYGON) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_err("get_fw_info exception error: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +static int __init hygon_tdm_init(void) +{ + int ret = 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + return ret; + } + + wake_up_process(kthread); + pr_info("TDM driver loaded successfully!\n"); + + return misc_register(&misc); +} + +static void __exit hygon_tdm_exit(void) +{ + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.7"); +MODULE_DESCRIPTION("The dynamic measure driver"); + +/* + * hygon_tdm_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tdm_init); +module_exit(hygon_tdm_exit); diff --git a/drivers/crypto/ccp/tdm_hygon.h b/drivers/crypto/ccp/tdm_hygon.h new file mode 100644 index 000000000000..ac5638986103 --- /dev/null +++ b/drivers/crypto/ccp/tdm_hygon.h @@ -0,0 +1,501 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_HYGON_H__ +#define __TDM_HYGON_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +#endif /* __TDM_HYGON_H__*/ -- Gitee From e30926136f72f2972eb2017d6a8fa4614f982bcb Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:21:07 +0800 Subject: [PATCH 855/953] anolis: alinux: tpm: add Hygon TPM2 driver ANBZ: #8670 Hygon CPU implemented a firmware-based TPM2 device, which runs on its internal secure processor named PSP. The device is fully compatible with TCG TPM2.0 spec (part 1 ~ 4) in the commands level, but underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field (see TCG ACPI Sepcification, Family 1.2 and 2.0, Chapter 8 "ACPI Device") to "HYGT0101", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/char/tpm/Kconfig | 12 +++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tpm_hygon.c | 186 +++++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100644 drivers/char/tpm/tpm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3..746661ded992 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -210,5 +210,17 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb310..ccce74915160 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -42,3 +42,4 @@ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 000000000000..37e2e1f19c8d --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); -- Gitee From 45a0a4d1bc5ea485ef19a611afe3efd59e72621a Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:23:54 +0800 Subject: [PATCH 856/953] anolis: crypto: tdm: Compile the tdm driver into ccp.ko ANBZ: #8670 The TDM driver is no longer a separate ko module, we compile it into ccp.ko Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 4 +- drivers/crypto/ccp/Makefile | 2 +- drivers/crypto/ccp/psp-dev.c | 16 ++++ drivers/crypto/ccp/{tdm_hygon.c => tdm-dev.c} | 89 ++++++++++++++----- drivers/crypto/ccp/{tdm_hygon.h => tdm-dev.h} | 9 +- 5 files changed, 92 insertions(+), 28 deletions(-) rename drivers/crypto/ccp/{tdm_hygon.c => tdm-dev.c} (97%) rename drivers/crypto/ccp/{tdm_hygon.h => tdm-dev.h} (98%) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 30902232acce..4d5566936262 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -60,8 +60,8 @@ config HYGON_PSP2CPU_CMD help Hygon PSP2CPU Command Support -config TDM_HYGON - tristate "Hygon TDM Interface" +config TDM_DEV_HYGON + bool "Hygon TDM Interface" default y depends on CRYPTO_DEV_CCP_DD depends on HYGON_PSP2CPU_CMD diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 0c66b4d5792d..4550a22f7c63 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,6 +16,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ psp-ringbuf.o \ csv-dev.o +ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ @@ -25,7 +26,6 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o -obj-$(CONFIG_TDM_HYGON) += tdm_hygon.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 47de733084b1..1566b955730e 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -21,6 +21,9 @@ #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "tdm-dev.h" +#endif struct psp_device *psp_master; @@ -330,6 +333,14 @@ static int psp_init(struct psp_device *psp) if (psp->vdata->platform_access) psp_init_platform_access(psp); +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } @@ -749,6 +760,11 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); diff --git a/drivers/crypto/ccp/tdm_hygon.c b/drivers/crypto/ccp/tdm-dev.c similarity index 97% rename from drivers/crypto/ccp/tdm_hygon.c rename to drivers/crypto/ccp/tdm-dev.c index 56927265841e..99f6e8f7416d 100644 --- a/drivers/crypto/ccp/tdm_hygon.c +++ b/drivers/crypto/ccp/tdm-dev.c @@ -22,7 +22,12 @@ #include #include #include -#include "tdm_hygon.h" +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt #define TDM_CMD_ID_MAX 16 #define TDM2PSP_CMD(id) (0x110 | (id)) @@ -57,6 +62,9 @@ static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; static struct task_struct *kthread; static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; static int list_check_exist(uint32_t task_id) { @@ -303,7 +311,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) int ret = 0; struct context_message ctx_msg = {0}; unsigned long return_address = 0; -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) struct module *p_module = NULL; #elif IS_ENABLED(CONFIG_KALLSYMS) char symbol_buf[128] = {0}; @@ -324,7 +332,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) return_address = CALLER_ADDR1; if (return_address) { -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) p_module = __module_address(return_address); // caller is module if (p_module) @@ -375,7 +383,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) static int tdm_verify_phy_addr_valid(struct addr_range_info *range) { int ret = 0; -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) int i; uint64_t phy_addr_start, phy_addr_end; @@ -520,6 +528,29 @@ static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uin return ret; } +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + int psp_get_fw_info(struct tdm_version *version) { int ret = 0; @@ -552,7 +583,7 @@ int psp_get_fw_info(struct tdm_version *version) if (error) { ret = -error; - pr_err("get_fw_info exception error: 0x%x\n", error); + pr_warn("get_fw_info exception: 0x%x\n", error); goto free_cmdresp; } @@ -1493,10 +1524,13 @@ static struct miscdevice misc = { .fops = &tdm_fops, }; -static int __init hygon_tdm_init(void) +int tdm_dev_init(void) { int ret = 0; + if (tdm_init_flag) + return 0; + INIT_KFIFO(kfifo_error_task); INIT_LIST_HEAD(&dyn_head.head); rwlock_init(&dyn_head.lock); @@ -1513,17 +1547,38 @@ static int __init hygon_tdm_init(void) if (IS_ERR(kthread)) { pr_err("kthread_create fail\n"); ret = PTR_ERR(kthread); - return ret; + goto unreg; } wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; pr_info("TDM driver loaded successfully!\n"); - return misc_register(&misc); + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; } -static void __exit hygon_tdm_exit(void) +int tdm_dev_destroy(void) { + if (tdm_destroy_flag) + goto end; + if (kthread) { kthread_stop(kthread); kthread = NULL; @@ -1532,18 +1587,8 @@ static void __exit hygon_tdm_exit(void) psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; } -MODULE_AUTHOR("niuyongwen@hygon.cn"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.7"); -MODULE_DESCRIPTION("The dynamic measure driver"); - -/* - * hygon_tdm_init must be done after ccp module init. - * That's why we use a device_initcall_sync which is - * called after all the device_initcall(includes ccp) but before the - * late_initcall(includes ima). - */ -device_initcall_sync(hygon_tdm_init); -module_exit(hygon_tdm_exit); diff --git a/drivers/crypto/ccp/tdm_hygon.h b/drivers/crypto/ccp/tdm-dev.h similarity index 98% rename from drivers/crypto/ccp/tdm_hygon.h rename to drivers/crypto/ccp/tdm-dev.h index ac5638986103..afc4761a7e81 100644 --- a/drivers/crypto/ccp/tdm_hygon.h +++ b/drivers/crypto/ccp/tdm-dev.h @@ -22,8 +22,8 @@ * Version: 0.3 (fw version 1.2) * 1.add remote authentication support. */ -#ifndef __TDM_HYGON_H__ -#define __TDM_HYGON_H__ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ #include #include @@ -481,6 +481,7 @@ struct tdm_show_device { /*Public api definition for tdm*/ typedef int (*measure_exception_handler_t)(uint32_t task_id); +int psp_check_tdm_support(void); int psp_get_fw_info(struct tdm_version *version); int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, uint32_t flag, struct authcode_2b *code); @@ -498,4 +499,6 @@ int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, struct tdm_pcr_value_2b *pcr_values); -#endif /* __TDM_HYGON_H__*/ +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ -- Gitee From 68adcd7497e491e1ddc4c284c0d457c2aa50d8a0 Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:28:24 +0800 Subject: [PATCH 857/953] anolis: crypto: tdm: Support dynamic protection for SCT and IDT by HYGON TDM ANBZ: #8670 tdm_kernel_guard is an application that uses HYGON TDM technology to protect important data in the kernel. Through this application, the dynamic protection of SCT and IDT is completed in the system. In the future, more protection objects can be expanded based on this application Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 11 + drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/tdm-kernel-guard.c | 352 ++++++++++++++++++++++++++ 3 files changed, 364 insertions(+) create mode 100644 drivers/crypto/ccp/tdm-kernel-guard.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 4d5566936262..702b4c6761fd 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -68,6 +68,17 @@ config TDM_DEV_HYGON help Hygon TDM driver +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 4550a22f7c63..088d53009824 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -26,6 +26,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/tdm-kernel-guard.c b/drivers/crypto/ccp/tdm-kernel-guard.c new file mode 100644 index 000000000000..c3afe888ea04 --- /dev/null +++ b/drivers/crypto/ccp/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); -- Gitee From 9c1186c58415c1ad5fcce0b3ed76901d7eabc4bd Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:31:45 +0800 Subject: [PATCH 858/953] anolis: alinux: tcm: add Hygon TCM2 driver ANBZ: #8670 Hygon CPU implemented a firmware-based TCM2 device, which runs on its internal secure processor named PSP. The device underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field to "HYGT0201", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/char/tpm/Kconfig | 12 ++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tcm_hygon.c | 243 +++++++++++++++++++++++++++++++++++ 3 files changed, 256 insertions(+) create mode 100644 drivers/char/tpm/tcm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 746661ded992..301284e07603 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -222,5 +222,17 @@ config TCG_HYGON driver as a module, choose M here; the module will be called tpm_hygon. +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index ccce74915160..8f868c9b9ce7 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -43,3 +43,4 @@ obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 000000000000..ef63d1a0a902 --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); -- Gitee From 4b5c6cc11d2d9d152d80fcc58994dc53685298d1 Mon Sep 17 00:00:00 2001 From: chench00 Date: Sun, 7 Apr 2024 14:14:44 +0800 Subject: [PATCH 859/953] anolis: crypto: command co-processor: Add config to anolis_defconfig ANBZ: #8670 Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- arch/x86/configs/anolis-debug_defconfig | 5 +++++ arch/x86/configs/anolis_defconfig | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 6f4e977693dd..2619e84b4914 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7430,6 +7430,11 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 47269a2f1c76..ee0afda89125 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7421,6 +7421,11 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m -- Gitee From fc7582a7c7a0f7fc05a39cf4b5826211958e2ff9 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:06:54 +0800 Subject: [PATCH 860/953] anolis: KVM: Define CSV3 key management command id ANBZ: #8683 Define Hygon CSV3 key management command id and structure. The command definition is available in Hygon CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- include/uapi/linux/kvm.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 61c5c6990801..6d8833ac456e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2311,4 +2311,25 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 5c07353e656fcbb9f8c481bdeadfddd4a68c2e47 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:13:58 +0800 Subject: [PATCH 861/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_INIT command ANBZ: #8683 The command initializes the CSV3 guest's context. The firmware should be initialized before we issue any CSV3 guest commands. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/Makefile | 2 + arch/x86/kvm/svm/csv.c | 110 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 23 +++++++++ arch/x86/kvm/svm/svm.c | 4 ++ 4 files changed, 139 insertions(+) create mode 100644 arch/x86/kvm/svm/csv.c create mode 100644 arch/x86/kvm/svm/csv.h diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 20fa8b2de8a4..16b463703387 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -37,6 +37,8 @@ obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o +kvm-amd-$(CONFIG_HYGON_CSV) += svm/csv.o + AFLAGS_svm/vmenter.o := -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c new file mode 100644 index 000000000000..7944d84e49bb --- /dev/null +++ b/arch/x86/kvm/svm/csv.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kvm_cache_regs.h" +#include "svm.h" +#include "csv.h" +#include "x86.h" + +#undef pr_fmt +#define pr_fmt(fmt) "CSV: " fmt + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +static struct kvm_x86_ops csv_x86_ops; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + return 0; +} + +static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r = -EINVAL; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV3_INIT: + r = csv3_guest_init(kvm, &sev_cmd); + break; + default: + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + r = csv_x86_ops.mem_enc_ioctl(kvm, argp); + goto out; + } + + mutex_unlock(&kvm->lock); + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + return r; +} + +void __init csv_init(struct kvm_x86_ops *ops) +{ + if (boot_cpu_has(X86_FEATURE_CSV3)) { + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->mem_enc_ioctl = csv_mem_enc_op; + ops->vm_size = sizeof(struct kvm_svm_csv); + } +} diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h new file mode 100644 index 000000000000..df5cf9ea9422 --- /dev/null +++ b/arch/x86/kvm/svm/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __SVM_CSV_H +#define __SVM_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void __init csv_init(struct kvm_x86_ops *ops); + +#else /* !CONFIG_HYGON_CSV */ + +static inline void __init csv_init(struct kvm_x86_ops *ops) { } + +#endif /* CONFIG_HYGON_CSV */ + +#endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 820208772b4d..e50e7fe1a86c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -48,6 +48,7 @@ #include "svm.h" #include "svm_ops.h" +#include "csv.h" #include "kvm_onhyperv.h" #include "svm_onhyperv.h" @@ -5455,6 +5456,9 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_init(&svm_x86_ops); + r = kvm_x86_vendor_init(&svm_init_ops); if (r) return r; -- Gitee From 81a31d0bd1f9218bde63882b6014a4920f0879fa Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 14:54:45 +0800 Subject: [PATCH 862/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_DATA command ANBZ: #8683 The command is used to load and encrypt data in CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 288 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 288 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 7944d84e49bb..3b6f98e05651 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -24,10 +24,19 @@ #undef pr_fmt #define pr_fmt(fmt) "CSV: " fmt +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -36,6 +45,12 @@ struct kvm_svm_csv { struct kvm_csv_info csv_info; }; +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + static struct kvm_x86_ops csv_x86_ops; static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) @@ -43,6 +58,35 @@ static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + +static int __csv_issue_cmd(int fd, int id, void *data, int *error) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = sev_issue_cmd_external_user(f.file, id, data, error); + + fdput(f); + return ret; +} + +static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_cmd(sev->fd, id, data, error); +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -66,6 +110,247 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) return 0; } +static int csv3_set_guest_private_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = csv_issue_cmd(kvm, CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -83,6 +368,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_INIT: r = csv3_guest_init(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) -- Gitee From 168a4bb33a9bc2558ab04430457d582705292e31 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:00:59 +0800 Subject: [PATCH 863/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_VMCB command ANBZ: #8683 The command is used to get secure VMCB physical address which is allocated in private memory by firmware. Besides, shadow VMCB physical address will be updated in secure VMCB. Also the firmware creates a new private page for guest's VMSA per vcpu. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 3b6f98e05651..8cdf01a2f128 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -65,6 +65,26 @@ static bool csv3_guest(struct kvm *kvm) return sev_es_guest(kvm) && csv->csv3_active; } +static int csv_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + static int __csv_issue_cmd(int fd, int id, void *data, int *error) { struct fd f; @@ -351,6 +371,52 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -371,6 +437,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_DATA: r = csv3_launch_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) -- Gitee From eefc8e7ad12cf45ef7820930b976299582a6eefe Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:07:10 +0800 Subject: [PATCH 864/953] anolis: KVM: SVM: CSV: Manage CSV3 guest's nested page table ANBZ: #8683 CSV3 guest's nested page table is managed by firmware. All the guest memory is private by default. The firmware maps guest's private memory in nested page table in advance. CSV3 guest may declare some memory regions as shared. It needs to send secure call command with specified memory region to firmware, then firmware frees the private pages which is mapped to the memory region. When guest access the specified memory region by then, nested page fault happens. When nested page fault happens, host needs to issue an external command UPDATE_NPT to firmware. Then firmware helps to map the specified shared pages in nested page table. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 482 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 482 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 8cdf01a2f128..b64619528726 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -31,11 +31,41 @@ struct encrypt_data_block { } entry[512]; }; +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +struct shared_page_block { + struct list_head list; + struct page **pages; + u64 count; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + /* List of shared pages */ + u64 total_shared_page_count; + struct list_head shared_pages_list; + void *cached_shared_page_block; + struct mutex shared_page_block_lock; + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -58,6 +88,24 @@ static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + static bool csv3_guest(struct kvm *kvm) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; @@ -107,6 +155,16 @@ static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __csv_issue_cmd(sev->fd, id, data, error); } +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -127,9 +185,20 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) csv->sev = sev; csv->nodemask = (unsigned long)params.nodemask; + INIT_LIST_HEAD(&csv->shared_pages_list); + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->shared_page_block_lock); + return 0; } +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + static int csv3_set_guest_private_memory(struct kvm *kvm) { struct kvm_memslots *slots = kvm_memslots(kvm); @@ -417,6 +486,416 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page **pages, *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page_block *shared_page_block = NULL; + u64 npages = PAGE_SIZE / sizeof(struct page *); + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (!page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + if (csv->total_shared_page_count % npages == 0) { + shared_page_block = kzalloc(sizeof(*shared_page_block), + GFP_KERNEL_ACCOUNT); + if (!shared_page_block) + return -ENOMEM; + + pages = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!pages) { + kfree(shared_page_block); + return -ENOMEM; + } + + shared_page_block->pages = pages; + list_add_tail(&shared_page_block->list, + &csv->shared_pages_list); + csv->cached_shared_page_block = shared_page_block; + } else { + shared_page_block = csv->cached_shared_page_block; + pages = shared_page_block->pages; + } + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + if (shared_page_block->count == 0) { + list_del(&shared_page_block->list); + kfree(pages); + kfree(shared_page_block); + } + return -ENOMEM; + } + + pages[csv->total_shared_page_count % npages] = page; + shared_page_block->count++; + csv->total_shared_page_count++; + *pfn = page_to_pfn(page); + } else { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + } + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->shared_page_block_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->shared_page_block_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct list_head *head = &csv->shared_pages_list; + struct list_head *pos, *q; + struct shared_page_block *shared_page_block; + struct kvm_vcpu *vcpu; + unsigned long i = 0; + + struct list_head *smr_head = &csv->smr_list; + struct secure_memory_region *smr; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->shared_page_block_lock); + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + shared_page_block = list_entry(pos, + struct shared_page_block, list); + unpin_user_pages(shared_page_block->pages, + shared_page_block->count); + kfree(shared_page_block->pages); + csv->total_shared_page_count -= + shared_page_block->count; + list_del(&shared_page_block->list); + kfree(shared_page_block); + } + } + mutex_unlock(&csv->shared_page_block_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -462,6 +941,9 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); ops->mem_enc_ioctl = csv_mem_enc_op; + ops->vm_destroy = csv_vm_destroy; ops->vm_size = sizeof(struct kvm_svm_csv); + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; } } -- Gitee From 5024716f9ecf64175565f7f86a217189e48aa57b Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:27:59 +0800 Subject: [PATCH 865/953] anolis: x86/boot/compressed/64: Add CSV3 guest detection ANBZ: #8683 Check if CSV3 guest is active at boot compressed stage. It checks HYGON hardware with CPUID 0x00000000 and bit30 of MSR 0xc0010131. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/Makefile | 1 + arch/x86/boot/compressed/csv.c | 37 ++++++++++++++++++++++++++++++ arch/x86/boot/compressed/csv.h | 23 +++++++++++++++++++ arch/x86/boot/compressed/head_64.S | 10 ++++++++ arch/x86/boot/compressed/misc.h | 1 + arch/x86/kernel/csv-shared.c | 16 +++++++++++++ 6 files changed, 88 insertions(+) create mode 100644 arch/x86/boot/compressed/csv.c create mode 100644 arch/x86/boot/compressed/csv.h create mode 100644 arch/x86/kernel/csv-shared.c diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 71fc531b95b4..a0c5dab165d9 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -108,6 +108,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 000000000000..79ffb8746d17 --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 000000000000..2331d4ade97f --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); + +#else + +static inline void csv_set_status(void) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index bf4a10a5794f..3eebf2e647f9 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* * configure_5level_paging() updates the number of paging levels using diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251..674433c522ed 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 000000000000..e46f873fd69d --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) -- Gitee From 0aadb0173d8fc0d45a975cb12bc3feea88c82c63 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:39:50 +0800 Subject: [PATCH 866/953] anolis: x86/boot/compressed/64: Init CSV3 secure call pages ANBZ: #8683 CSV3 secure call is a method to communicate with the dedicated secure processor that host cannot tamper with. We declare two dedicated pages named secure call pages to hold the command which guest wants to send to the secure processor. The secure processor always sets only one page of the two as present in nested page table. Read/write action on the two pages will triger NPF then host must issue an external command to the secure processor. The secure processor gets the guest's command if the fault address is secure call page. CSV3 secure call command is used to set specified memory as shared or private in usual. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/csv.c | 30 +++++ arch/x86/boot/compressed/csv.h | 2 + arch/x86/boot/compressed/head_64.S | 10 ++ arch/x86/kernel/csv-shared.c | 199 +++++++++++++++++++++++++++++ 4 files changed, 241 insertions(+) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index 79ffb8746d17..cad545271cc3 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -7,9 +7,39 @@ #include "misc.h" +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + #include "../../kernel/csv-shared.c" static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} void csv_set_status(void) { diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 2331d4ade97f..3a2196b328c6 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -13,10 +13,12 @@ #ifdef CONFIG_HYGON_CSV void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); #else static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } #endif diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 3eebf2e647f9..d01f9431cee1 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -473,6 +473,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c index e46f873fd69d..fd55e570bbbb 100644 --- a/arch/x86/kernel/csv-shared.c +++ b/arch/x86/kernel/csv-shared.c @@ -8,9 +8,208 @@ * Copyright (C) Hygon Info Technologies Ltd. */ +#include + #define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 #define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 #define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e #define MSR_CSV3_ENABLED_BIT 30 #define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call - issue early secure call command at the stage where + * identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} -- Gitee From 91ca5cf787e9dd67be301e4170a1476b12b8e85f Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:26:46 +0800 Subject: [PATCH 867/953] anolis: x86/boot/compressed/64: Add CSV3 update page attr(private/shared) ANBZ: #8683 The function is needed to set encrypted page as private or set decrypted page as shared at the stage where identity page table is created. By default, all memory is set as private. CSV3 guest's NPT is managed by the secure processor. The secure processor must perform the correct action for private/shared memory. The secure processor manages the guest's secure isolated memory which cannot be accessed by other guest or host. As CSV3 feaure, CSV3 guest's encrypted memory maps to secure isolated memory and decrypted memory which is shared with host maps to normal memory. At the stage of kernel decompressing, only GHCB page is set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/csv.c | 14 ++++++++++++++ arch/x86/boot/compressed/csv.h | 5 +++++ arch/x86/boot/compressed/ident_map_64.c | 3 +++ 3 files changed, 22 insertions(+) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index cad545271cc3..ae6ca1484048 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -19,6 +19,20 @@ static unsigned int csv3_enabled __section(".data"); static unsigned int csv3_secure_call_init; +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_DEC); + } +} + /* Invoke it before jump to real kernel in case secure call pages are not mapped * in the identity page table. * diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 3a2196b328c6..8b8a33551895 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -15,11 +15,16 @@ void csv_set_status(void); void csv_init_secure_call_pages(void *boot_params); +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + #else static inline void csv_set_status(void) { } static inline void csv_init_secure_call_pages(void *boot_params) { } +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + #endif #endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0..a7b4148a943f 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. -- Gitee From dadc0cd7a4351ed3f431ead351f4ee050c1c8d03 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:32:56 +0800 Subject: [PATCH 868/953] anolis: x86/kernel: Add CSV3 early update(enc/dec)/reset memory helpers ANBZ: #8683 The functions are needed to set memory as private/shared memory or reset all memory as private memory at the stage where the identity mapping page table is available. Generally, at early runtime of the decompressed kernel, it needs to obtain CSV3 secure call pages then reset all memory as private before switching to new kernel page table. Otherwise, prior shared memory regions will be wrongly used and private data in guest may be accessed maliciously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 12 +++++++ arch/x86/kernel/Makefile | 2 ++ arch/x86/kernel/csv.c | 68 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 arch/x86/kernel/csv.c diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 68f55e1b857b..1fa9b4396c68 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -32,6 +32,12 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size); uint32_t csv_get_smr_entry_shift(void); +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -46,6 +52,12 @@ static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0000325ab98f..c25d40cbbdbe 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -160,3 +160,5 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 000000000000..a90246e31ae5 --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); +} -- Gitee From 3bad69dc43eda1addc15b6456207d08d5eecc68a Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:36:33 +0800 Subject: [PATCH 869/953] anolis: x86/kernel: Set bss decrypted memory as shared in CSV3 guest ANBZ: #8683 Guest kernel declares bss decrypted memory section to share data with host. In CSV3 guest, the decrypted memory must be set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kernel/head64.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 08dd5d8932a9..612c649743b0 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -41,6 +41,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -161,6 +162,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* -- Gitee From 5e42de4a62700153a4343e93ffbf3947f0ad0fb7 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:42:09 +0800 Subject: [PATCH 870/953] anolis: x86: Update memory shared/private attribute in early boot for CSV3 guest ANBZ: #8683 Add functions to change the memory shared/private attribute in early boot code. When CSV3 is active, the decrypted memory must be mapped to normal (non-isolated) memory in nested page table so that hypervisor and guest can access shared data. But in-place encrypt/decrypt action on the memory is not applicable in CSV3 as CSV3 guest's private page will not be changed to shared page until the secure processor update NPT. Also new secure call pages should be initialized for per cpu to support multiple cpu secure call commands simultaneously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 5 + arch/x86/kernel/csv.c | 186 ++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 14 +++ 3 files changed, 205 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 1fa9b4396c68..0cc4e48b9ade 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -38,6 +38,8 @@ void __init csv_early_reset_memory(struct boot_params *bp); void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -58,6 +60,9 @@ static inline void __init csv_early_reset_memory(struct boot_params *bp) { } static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index a90246e31ae5..9f3b0f3d3cd9 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -23,6 +23,15 @@ struct secure_call_pages { struct csv3_secure_call_cmd page_b; }; +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + bool noinstr csv3_active(void) { if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { @@ -66,3 +75,180 @@ void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) if (pages) csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); } + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655..9645bf5d6f95 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -377,6 +378,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +394,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +474,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } -- Gitee From 9d20743664420d3575dea09251abda83ebc1b6c8 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:47:23 +0800 Subject: [PATCH 871/953] anolis: x86: Add support for changing the memory attribute for CSV3 guest ANBZ: #8683 Add support for changing the memory to private or shared memory for multiple pages if CSV3 is active. When CSV3 guest wants to share data with host like SWIOTLB or change the unused shared memory to private memory, it must perform an secure call command to the secure processor to update mapping in nested page table. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 4 +++ arch/x86/kernel/csv.c | 52 +++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 8 ++++++ 3 files changed, 64 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 0cc4e48b9ade..30e8a89ce8c0 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -40,6 +40,8 @@ void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -63,6 +65,8 @@ static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) { } +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 9f3b0f3d3cd9..c0ad12aa94f3 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -191,6 +191,50 @@ static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, early_secure_call_page_idx ^= 1; } +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, u64 pages, bool enc) @@ -252,3 +296,11 @@ void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, npages, enc); } + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 9645bf5d6f95..f7d88ad030b9 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -345,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } -- Gitee From 98dfcc4fb389b70df027c1256a5abf5a5b11c3bd Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:20:27 +0800 Subject: [PATCH 872/953] anolis: x86/mm: Print CSV3 info into kernel log ANBZ: #8683 Print Hygon secure virtualization feature. Add CSV3 info in feature list if CSV3 is active. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/mm/mem_encrypt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b97261dfd13d..054f6113be67 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -58,6 +59,9 @@ static void print_hygon_cc_feature_info(void) /* Encrypted Register State */ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); } static void print_mem_encrypt_feature_info(void) -- Gitee From 974de9f517f55ed751253c1325f60cd9c5a915c9 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:25:38 +0800 Subject: [PATCH 873/953] anolis: mm/cma: add API to enable concurrent allocation from the CMA ANBZ: #8687 The mutex prevents allocating CMA memory concurently, and it's removed and reverted back and forth, refer to patch 60a60e32cf91 and 60a60e32cf91 from mainline. To solve the awkward dilemma, an API to enable concurrency is added, it's up to user to decide whether their CMA can handle concurrent allocations. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2997 --- include/linux/cma.h | 1 + mm/cma.c | 14 ++++++++++++-- mm/cma.h | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 4dadf9a05752..326ec54b8efa 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -57,4 +57,5 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) extern void cma_reserve_pages_on_error(struct cma *cma); extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/mm/cma.c b/mm/cma.c index 721316622bca..0238fc625127 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -496,10 +496,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -613,3 +615,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 12aba820969c..50275c1d98cc 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; -- Gitee From c11e54acf0647ea6a4b4a2e9d3b68ffa3e03119e Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:28:39 +0800 Subject: [PATCH 874/953] anolis: x86/mm: CSV allows CMA allocation concurrently ANBZ: #8687 CSV allows CMA allocation concurrently. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2997 --- arch/x86/mm/csv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c index fe5ca7ed4493..09f2cb7b358a 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/csv.c @@ -168,6 +168,7 @@ static void __init csv_cma_reserve_mem(void) 1 << CSV_CMA_SHIFT, node); break; } + cma_enable_concurrency(csv_cma->cma); if (start > cma_get_base(csv_cma->cma) || !start) start = cma_get_base(csv_cma->cma); -- Gitee From 4c66a8667dbd4868de7d7b22a40bdbb2e06819ac Mon Sep 17 00:00:00 2001 From: Wang Yinfeng Date: Tue, 22 Feb 2022 22:13:07 +0800 Subject: [PATCH 875/953] anolis: ipmi_si: Phytium S2500 workaround for MMIO-based IPMI ANBZ: #8712 phytium inclusion category: bugfix CVE: NA -------------------------------- The system would hang up when the Phytium S2500 communicates with some BMCs after several rounds of transactions, unless we reset the controller timeout counter manually by calling firmware through SMC. Signed-off-by: Wang Yinfeng Signed-off-by: Chen Baozi Signed-off-by: Jiakun Shuai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3016 --- drivers/char/ipmi/ipmi_si_mem_io.c | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index 86b92e93a70d..dc6cf7d89fea 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -3,9 +3,77 @@ #include #include "ipmi_si.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include + +#define CTL_RST_FUNC_ID 0xC2000011 + +static bool apply_phytium2500_workaround; + +struct ipmi_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; +}; + +#ifdef CONFIG_ACPI +static struct ipmi_workaround_oem_info wa_info[] = { + { + .oem_id = "KPSVVJ", + } +}; +#endif + +static void ipmi_check_phytium_workaround(void) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header tbl; + int i; + + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl))) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + apply_phytium2500_workaround = true; + break; + } +#endif +} + +static void ctl_smc(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + struct arm_smccc_res res; + + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res); + if (res.a0 != 0) + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n", + (int)res.a0, arg2); +} + +static void ctl_timeout_reset(void) +{ + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1); + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1); +} + +static inline void ipmi_phytium_workaround(void) +{ + if (apply_phytium2500_workaround) + ctl_timeout_reset(); +} + +#else +static inline void ipmi_check_phytium_workaround(void) {} +static inline void ipmi_phytium_workaround(void) {} +#endif + static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return readb((io->addr)+(offset * io->regspacing)); } @@ -18,6 +86,8 @@ static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -31,6 +101,8 @@ static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -44,6 +116,8 @@ static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -81,6 +155,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; + ipmi_check_phytium_workaround(); + /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. -- Gitee From 80a3f509e45e4c75861e2cfecc6aa7b9d6fea6be Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 14:28:48 +0800 Subject: [PATCH 876/953] anolis: crypto: ccp: Define CSV3 migration command id ANBZ: #8688 Define CSV3 migration command id and structure. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- drivers/crypto/ccp/sev-dev.c | 8 +++ include/linux/psp-csv.h | 97 ++++++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 80a733d208ce..ab0fadb2ed90 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -204,6 +204,14 @@ static int sev_cmd_buffer_len(int cmd) return sizeof(struct csv3_data_dbg_read_vmsa); case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: + return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); default: break; } diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h index 960459375cd6..2da1adea8d33 100644 --- a/include/linux/psp-csv.h +++ b/include/linux/psp-csv.h @@ -188,4 +188,101 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + #endif -- Gitee From d53fe2bb6df8a40b3b08b05b741111a3af09e8b8 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:01:57 +0800 Subject: [PATCH 877/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_DATA command ANBZ: #8688 The command is used for encrypting the guest memory page using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 171 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 +++ 2 files changed, 181 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index b64619528726..454ba28f7702 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -42,6 +43,18 @@ union csv3_page_attr { u64 val; }; +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -486,6 +499,161 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -919,6 +1087,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6d8833ac456e..ec36a2314b6b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2318,6 +2318,7 @@ enum csv3_cmd_id { KVM_CSV3_INIT = KVM_CSV3_NR_MIN, KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2332,4 +2333,13 @@ struct kvm_csv3_launch_encrypt_data { __u32 len; }; +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From e8c994d17b8cf2e558ad920afec4ab24ef2a9904 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:05:56 +0800 Subject: [PATCH 878/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_CONTEXT command ANBZ: #8688 The command is used for encrypting the guest cpu context using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 118 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 126 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 454ba28f7702..755918d74fb3 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -654,6 +654,121 @@ static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1090,6 +1205,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_DATA: r = csv3_send_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ec36a2314b6b..665f2d68a59b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2319,6 +2319,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2342,4 +2343,11 @@ struct kvm_csv3_send_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 90da17ff716d08dc838ee83ddbc8a8688bd2d064 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:24:11 +0800 Subject: [PATCH 879/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_DATA command ANBZ: #8688 The command is used for copying the incoming buffer into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 122 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 ++++ 2 files changed, 132 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 755918d74fb3..d5620606cde2 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -769,6 +769,125 @@ static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, &data, + &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1208,6 +1327,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_CONTEXT: r = csv3_send_encrypt_context(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 665f2d68a59b..a79c56e49d18 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2320,6 +2320,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2350,4 +2351,13 @@ struct kvm_csv3_send_encrypt_context { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From ceb869341f12be494e40489691de66338e22d05c Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:29:50 +0800 Subject: [PATCH 880/953] anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT command ANBZ: #8688 The command is used for copying the incoming context into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 147 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 155 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index d5620606cde2..9b9d86169537 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -55,6 +55,10 @@ struct trans_paddr_block { u64 trans_paddr[512]; }; +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -888,6 +892,146 @@ static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, &data, + &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1330,6 +1474,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_RECEIVE_ENCRYPT_DATA: r = csv3_receive_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a79c56e49d18..def7a0ee9717 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2321,6 +2321,7 @@ enum csv3_cmd_id { KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2360,4 +2361,11 @@ struct kvm_csv3_receive_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From abffe297ab6767f70563aea9e5e013c020dfba7f Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Tue, 9 Jan 2024 17:24:42 +0800 Subject: [PATCH 881/953] fuse: Introduce a new notification type for resend pending requests ANBZ: #8703 commit 760eac73f9f69aa28fcb3050b4946c2dcc656d12 upstream. When a FUSE daemon panics and failover, we aim to minimize the impact on applications by reusing the existing FUSE connection. During this process, another daemon is employed to preserve the FUSE connection's file descriptor. The new started FUSE Daemon will takeover the fd and continue to provide service. However, it is possible for some inflight requests to be lost and never returned. As a result, applications awaiting replies would become stuck forever. To address this, we can resend these pending requests to the new started FUSE daemon. This patch introduces a new notification type "FUSE_NOTIFY_RESEND", which can trigger resending of the pending requests, ensuring they are properly processed again. Signed-off-by: Zhao Chen Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/dev.c | 56 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/fuse.h | 2 ++ 2 files changed, 58 insertions(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1a8f82f478cb..d3f6a24475f2 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1775,6 +1775,59 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, return err; } +/* + * Resending all processing queue requests. + * + * During a FUSE daemon panics and failover, it is possible for some inflight + * requests to be lost and never returned. As a result, applications awaiting + * replies would become stuck forever. To address this, we can use notification + * to trigger resending of these pending requests to the FUSE daemon, ensuring + * they are properly processed again. + * + * Please note that this strategy is applicable only to idempotent requests or + * if the FUSE daemon takes careful measures to avoid processing duplicated + * non-idempotent requests. + */ +static void fuse_resend(struct fuse_conn *fc) +{ + struct fuse_dev *fud; + struct fuse_req *req, *next; + struct fuse_iqueue *fiq = &fc->iq; + LIST_HEAD(to_queue); + unsigned int i; + + spin_lock(&fc->lock); + if (!fc->connected) { + spin_unlock(&fc->lock); + return; + } + + list_for_each_entry(fud, &fc->devices, entry) { + struct fuse_pqueue *fpq = &fud->pq; + + spin_lock(&fpq->lock); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], &to_queue); + spin_unlock(&fpq->lock); + } + spin_unlock(&fc->lock); + + list_for_each_entry_safe(req, next, &to_queue, list) { + __set_bit(FR_PENDING, &req->flags); + } + + spin_lock(&fiq->lock); + /* iq and pq requests are both oldest to newest */ + list_splice(&to_queue, &fiq->pending); + fiq->ops->wake_pending_and_unlock(fiq); +} + +static int fuse_notify_resend(struct fuse_conn *fc) +{ + fuse_resend(fc); + return 0; +} + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { @@ -1800,6 +1853,9 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); + case FUSE_NOTIFY_RESEND: + return fuse_notify_resend(fc); + default: fuse_copy_finish(cs); return -EINVAL; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 33b56d9e4803..aeeecf0fba63 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -214,6 +214,7 @@ * * 7.40 * - add FUSE_NO_EXPORT_SUPPORT init flag + * - add FUSE_NOTIFY_RESEND */ #ifndef _LINUX_FUSE_H @@ -640,6 +641,7 @@ enum fuse_notify_code { FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, FUSE_NOTIFY_CODE_MAX, }; -- Gitee From 4268dbf60046c5431f2a09849a829b682fdac448 Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Tue, 9 Jan 2024 17:24:43 +0800 Subject: [PATCH 882/953] fuse: Use the high bit of request ID for indicating resend requests ANBZ: #8703 commit 9e7f5296f475ba5ab887ae3e55b922e17e99752b upstream. Some FUSE daemons want to know if the received request is a resend request. The high bit of the fuse request ID is utilized for indicating this, enabling the receiver to perform appropriate handling. The init flag "FUSE_HAS_RESEND" is added to indicate this feature. Signed-off-by: Zhao Chen Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/dev.c | 2 ++ fs/fuse/inode.c | 2 +- include/uapi/linux/fuse.h | 13 ++++++++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index d3f6a24475f2..2a2ba223f6ed 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1814,6 +1814,8 @@ static void fuse_resend(struct fuse_conn *fc) list_for_each_entry_safe(req, next, &to_queue, list) { __set_bit(FR_PENDING, &req->flags); + /* mark the request as resend request */ + req->in.h.unique |= FUSE_UNIQUE_RESEND; } spin_lock(&fiq->lock); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index aaad201d68d8..0a07e977e7c1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1346,7 +1346,7 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | - FUSE_NO_EXPORT_SUPPORT; + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index aeeecf0fba63..8da8e72ed313 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -214,7 +214,7 @@ * * 7.40 * - add FUSE_NO_EXPORT_SUPPORT init flag - * - add FUSE_NOTIFY_RESEND + * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag */ #ifndef _LINUX_FUSE_H @@ -415,6 +415,8 @@ struct fuse_file_lock { * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support + * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit + * of the request ID indicates resend requests */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -455,6 +457,7 @@ struct fuse_file_lock { #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) #define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) +#define FUSE_HAS_RESEND (1ULL << 39) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP @@ -967,6 +970,14 @@ struct fuse_fallocate_in { uint32_t padding; }; +/** + * FUSE request unique ID flag + * + * Indicates whether this is a resend request. The receiver should handle this + * request accordingly. + */ +#define FUSE_UNIQUE_RESEND (1ULL << 63) + struct fuse_in_header { uint32_t len; uint32_t opcode; -- Gitee From 34a2431dff8e8f05f84c1621a2672f82c14d2138 Mon Sep 17 00:00:00 2001 From: Xu Ji Date: Thu, 12 Oct 2023 11:46:20 +0800 Subject: [PATCH 883/953] anolis: fuse: increase FUSE_MAX_MAX_PAGES limit ANBZ: #8703 Set FUSE_MAX_MAX_PAGES to 1024, we can send read/write requests with max size of 4MB. Signed-off-by: Xu Ji Reviewed-by: Jingbo Xu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/fuse_i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index fe775e4f91ef..525c98353947 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -37,7 +37,7 @@ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 /** Maximum of max_pages received in init_out */ -#define FUSE_MAX_MAX_PAGES 256 +#define FUSE_MAX_MAX_PAGES 1024 /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN -- Gitee From ca7ce4498bc79d71c59e7d9927000eb93114af81 Mon Sep 17 00:00:00 2001 From: Yifei Zhang Date: Sat, 21 Oct 2023 23:26:09 +0800 Subject: [PATCH 884/953] anolis: fuse: make fuse support configuring delete_stale feature ANBZ: #8703 by default, fuse always caches dentry for performance, which results in fuse daemon always saving fd created by lookup, and if the file is not deleted through the fuse file system, this will cause fd in fuse daemon to always reference the deleted file and cannot be released. our system can avoid this problem by delete_stale and re-lookup every time instead of caching dentry. virtiofs already supports this, but fuse doesn't enable this by default. This PR is to make the delete_stale configurable. Signed-off-by: Yifei Zhang Reviewed-by: Jingbo Xu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/inode.c | 5 ++++- include/uapi/linux/fuse.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 0a07e977e7c1..674894b819bf 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1299,6 +1299,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->direct_io_allow_mmap = 1; if (flags & FUSE_NO_EXPORT_SUPPORT) fm->sb->s_export_op = &fuse_export_fid_operations; + if (flags & FUSE_DELETE_STALE) + fc->delete_stale = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1346,7 +1348,8 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | - FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND; + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | + FUSE_DELETE_STALE; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 8da8e72ed313..730e620286a7 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -462,6 +462,9 @@ struct fuse_file_lock { /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP +#define FUSE_DELETE_STALE (1ULL << 58) +/* The 59th bit is left to FUSE_DIO_SHARED_MMAP */ + /** * CUSE INIT request/reply flags * -- Gitee From 618b95f57cd52ec291c5963e07713f0f7b45e7d4 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 19:39:55 +0800 Subject: [PATCH 885/953] anolis: x86/delay: add support for Zhaoxin ZXPAUSE instruction ANBZ: #7809 ZXPAUSE instructs the processor to enter an implementation-dependent optimized state. The instruction execution wakes up when the time-stamp counter reaches or exceeds the implicit EDX:EAX 64-bit input value. The instruction execution also wakes up due to the expiration of the operating system time-limit or by an external interrupt. ZXPAUSE is available on processors with X86_FEATURE_ZXPAUSE. ZXPAUSE allows the processor to enter a light-weight power/performance optimized state (C0.1 state) for a period specified by the instruction or until the system time limit. MSR_ZX_PAUSE_CONTROL MSR register allows the OS to enable/disable C0.2 on the processor and to set the maximum time the processor can reside in C0.1 or C0.2. By default C0.2 is disabled. A sysfs interface to adjust the time and the C0.2 enablement is provided in a follow up change. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2710 --- MAINTAINERS | 5 + arch/x86/include/asm/cpufeature.h | 7 +- arch/x86/include/asm/cpufeatures.h | 5 +- arch/x86/include/asm/delay.h | 1 + arch/x86/include/asm/disabled-features.h | 3 +- arch/x86/include/asm/msr-index.h | 11 + arch/x86/include/asm/mwait.h | 15 ++ arch/x86/include/asm/required-features.h | 3 +- arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/centaur.c | 3 + arch/x86/kernel/cpu/zhaoxin.c | 3 + arch/x86/kernel/cpu/zxpause.c | 238 ++++++++++++++++++ arch/x86/kernel/time.c | 2 + arch/x86/lib/delay.c | 27 ++ tools/arch/x86/include/asm/cpufeatures.h | 5 +- .../arch/x86/include/asm/disabled-features.h | 4 +- tools/arch/x86/include/asm/msr-index.h | 11 + .../arch/x86/include/asm/required-features.h | 4 +- 18 files changed, 340 insertions(+), 8 deletions(-) create mode 100644 arch/x86/kernel/cpu/zxpause.c diff --git a/MAINTAINERS b/MAINTAINERS index 357621e9a72b..139691fb4b8b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23893,6 +23893,11 @@ L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/zhaoxin-cputemp.c +ZHAOXIN ZXPAUSE INSTRUCTION SUPPORT +M: LeoLiu-oc +S: Maintained +F: arch/x86/kernel/cpu/zxpause.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c97522b47019..1442caced09b 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -34,6 +34,7 @@ enum cpuid_leafs CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, CPUID_8C86_0000_EDX, + CPUID_C000_0006_EAX, }; #define X86_CAP_FMT_NUM "%d:%d" @@ -93,8 +94,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 22, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -119,8 +121,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 22, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index ca055890773b..ce10cb8646d8 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 22 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -480,6 +480,9 @@ #define X86_FEATURE_SM3 (21*32 + 1) /* SM3 instructions */ #define X86_FEATURE_SM4 (21*32 + 2) /* SM4 instructions */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 22 */ +#define X86_FEATURE_ZXPAUSE (22*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 630891d25819..4dbb3fea67fb 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,6 +7,7 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); +void __init use_zxpause_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 88fcf08458d9..b108e656fa5b 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -144,6 +144,7 @@ #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 #define DISABLED_MASK21 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define DISABLED_MASK22 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 0e72ec1d6e25..b7df4d09b467 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index bae83810505b..3aa7f98683e3 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,6 +26,8 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 +#define ZXPAUSE_C01_STATE 1 + static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -148,4 +150,17 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } +/* + * Caller can specify whether to enter C0.1 (low latency, less + * power saving) or C0.2 state (saves more power, but longer wakeup + * latency). This may be overridden by the ZX_PAUSE_CONTROL MSR + * which can force requests for C0.2 to be downgraded to C0.1. + */ +static inline void __zxpause(u32 ecx, u32 edx, u32 eax) +{ + /* "zxpause %ecx, %edx, %eax;" */ + asm volatile(".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +} #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index e9187ddd3d1f..76953f757f3c 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -100,6 +100,7 @@ #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 #define REQUIRED_MASK21 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4350f6bfc064..dec6b0d9e711 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -25,6 +25,7 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zxpause.o obj-$(CONFIG_PROC_FS) += proc.o obj-y += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index ad6982391bc9..b15bcf21ac7b 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -117,6 +117,9 @@ static void early_init_centaur(struct cpuinfo_x86 *c) if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index f9a65b57a6bd..8e4201ad1d23 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -86,6 +86,9 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zxpause.c b/arch/x86/kernel/cpu/zxpause.c new file mode 100644 index 000000000000..7f55f5d9e8c0 --- /dev/null +++ b/arch/x86/kernel/cpu/zxpause.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#define ZXPAUSE_C02_ENABLE 0 + +#define ZXPAUSE_CTRL_VAL(max_time, c02_disable) \ + (((max_time) & MSR_ZX_PAUSE_CONTROL_TIME_MASK) | \ + ((c02_disable) & MSR_ZX_PAUSE_CONTROL_C02_DISABLE)) + +/* + * Cache ZX_PAUSE_CONTROL MSR. This is a systemwide control. By default, + * zxpause max time is 100000 in TSC-quanta and C0.2 is enabled + */ +static u32 zxpause_control_cached = ZXPAUSE_CTRL_VAL(100000, ZXPAUSE_C02_ENABLE); + +/* + * Cache the original ZX_PAUSE_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_zxpause_control_cached __ro_after_init; + +/* + * Serialize access to zxpause_control_cached and ZX_PAUSE_CONTROL MSR in + * the sysfs write functions. + */ +static DEFINE_MUTEX(zxpause_lock); + +static void zxpause_update_control_msr(void *unused) +{ + lockdep_assert_irqs_disabled(); + wrmsr(MSR_ZX_PAUSE_CONTROL, READ_ONCE(zxpause_control_cached), 0); +} + +/* + * The CPU hotplug callback sets the control MSR to the global control + * value. + * + * Disable interrupts so the read of zxpause_control_cached and the WRMSR + * are protected against a concurrent sysfs write. Otherwise the sysfs + * write could update the cached value after it had been read on this CPU + * and issue the IPI before the old value had been written. The IPI would + * interrupt, write the new value and after return from IPI the previous + * value would be written by this CPU. + * + * With interrupts disabled the upcoming CPU either sees the new control + * value or the IPI is updating this CPU to the new control value after + * interrupts have been reenabled. + */ +static int zxpause_cpu_online(unsigned int cpu) +{ + local_irq_disable(); + zxpause_update_control_msr(NULL); + local_irq_enable(); + return 0; +} + +/* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int zxpause_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_zxpause_control_cached is never changed after it caches + * the original control MSR value in zxpause_init(). So there + * is no race condition here. + */ + wrmsr(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached, 0); + + return 0; +} + +/* + * On resume, restore ZX_PAUSE_CONTROL MSR on the boot processor which + * is the only active CPU at this time. The MSR is set up on the APs via the + * CPU hotplug callback. + * + * This function is invoked on resume from suspend and hibernation. On + * resume from suspend the restore should be not required, but we neither + * trust the firmware nor does it matter if the same value is written + * again. + */ +static void zxpause_syscore_resume(void) +{ + zxpause_update_control_msr(NULL); +} + +static struct syscore_ops zxpause_syscore_ops = { + .resume = zxpause_syscore_resume, +}; + +/* sysfs interface */ + +/* + * When bit 0 in ZX_PAUSE_CONTROL MSR is 1, C0.2 is disabled. + * Otherwise, C0.2 is enabled. + */ +static inline bool zxpause_ctrl_c02_enabled(u32 ctrl) +{ + return !(ctrl & MSR_ZX_PAUSE_CONTROL_C02_DISABLE); +} + +static inline u32 zxpause_ctrl_max_time(u32 ctrl) +{ + return ctrl & MSR_ZX_PAUSE_CONTROL_TIME_MASK; +} + +static inline void zxpause_update_control(u32 maxtime, bool c02_enable) +{ + u32 ctrl = maxtime & MSR_ZX_PAUSE_CONTROL_TIME_MASK; + + if (!c02_enable) + ctrl |= MSR_ZX_PAUSE_CONTROL_C02_DISABLE; + + WRITE_ONCE(zxpause_control_cached, ctrl); + /* Propagate to all CPUs */ + on_each_cpu(zxpause_update_control_msr, NULL, 1); +} + +static ssize_t +enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%d\n", zxpause_ctrl_c02_enabled(ctrl)); +} + +static ssize_t enable_c02_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool c02_enable; + u32 ctrl; + int ret; + + ret = kstrtobool(buf, &c02_enable); + if (ret) + return ret; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (c02_enable != zxpause_ctrl_c02_enabled(ctrl)) + zxpause_update_control(ctrl, c02_enable); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(enable_c02); + +static ssize_t +max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%u\n", zxpause_ctrl_max_time(ctrl)); +} + +static ssize_t max_time_store(struct device *kobj, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 max_time, ctrl; + int ret; + + ret = kstrtou32(buf, 0, &max_time); + if (ret) + return ret; + + /* bits[1:0] must be zero */ + if (max_time & ~MSR_ZX_PAUSE_CONTROL_TIME_MASK) + return -EINVAL; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (max_time != zxpause_ctrl_max_time(ctrl)) + zxpause_update_control(max_time, zxpause_ctrl_c02_enabled(ctrl)); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(max_time); + +static struct attribute *zxpause_attrs[] = { + &dev_attr_enable_c02.attr, + &dev_attr_max_time.attr, + NULL +}; + +static struct attribute_group zxpause_attr_group = { + .attrs = zxpause_attrs, + .name = "zxpause_control", +}; + +static int __init zxpause_init(void) +{ + struct device *dev; + int ret; + + if (!boot_cpu_has(X86_FEATURE_ZXPAUSE)) + return -ENODEV; + + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_zxpause_control_cached + * is modified. + */ + rdmsrl(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "zxpause:online", + zxpause_cpu_online, zxpause_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } + + register_syscore_ops(&zxpause_syscore_ops); + + /* + * Add zxpause control interface. Ignore failure, so at least the + * default values are set up in case the machine manages to boot. + */ + dev = bus_get_dev_root(&cpu_subsys); + return sysfs_create_group(&dev->kobj, &zxpause_attr_group); +} +device_initcall(zxpause_init); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index e42faa792c07..4b790925b3e6 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -102,6 +102,8 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); + else if (static_cpu_has(X86_FEATURE_ZXPAUSE)) + use_zxpause_delay(); } /* diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 0e65d00e2339..3946badbd78f 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,6 +117,27 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } +/* + * On ZHAOXIN the ZXPAUSE instruction waits until any of: + * 1) the delta of TSC counter exceeds the value provided in EDX:EAX + * 2) global timeout in ZX_PAUSE_CONTROL is exceeded + * 3) an external interrupt occurs + */ +static void delay_halt_zxpause(u64 unused, u64 cycles) +{ + u64 until = cycles; + u32 eax, edx; + + eax = lower_32_bits(until); + edx = upper_32_bits(until); + + /* + * Hard code the deeper (C0.1) sleep state because exit latency is + * small compared to the "microseconds" that usleep() will delay. + */ + __zxpause(ZXPAUSE_C01_STATE, edx, eax); +} + /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -183,6 +204,12 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } +void __init use_zxpause_delay(void) +{ + delay_halt_fn = delay_halt_zxpause; + delay_fn = delay_halt; +} + void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 798e60b5454b..dd31390524b7 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -443,6 +443,9 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 22 */ +#define X86_FEATURE_ZXPAUSE (22*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index fafe9be7a6f4..cbf8c3ea37fe 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -131,6 +131,8 @@ #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK22 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 1d111350197f..06c4386a620b 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 7ba1726b71c7..76953f757f3c 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -99,6 +99,8 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ -- Gitee From 92179d55bc738ec743519ce35c7006b882bd3023 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 17:25:52 +0800 Subject: [PATCH 886/953] anolis: KVM: x86: Introduce support for Zhaoxin ZXPAUSE instruction ANBZ: #7809 This patch introduces support for the ZXPAUSE instruction, a new addition akin to Intel's TPAUSE. Two primary distinctions set apart ZXPAUSE from TPAUSE: 1. ZXPAUSE utilizes a delta tsc, determined from the lesser value between (MSR_ZX_PAUSE_CONTROL[31:2] << 2) and the EDX:EAX input to the ZXPAUSE instruction, subtracted from the current tsc value. In contrast, TPAUSE employs a target tsc, computed from the lesser value between (MSR_IA32_UMWAIT_CONTROL[31:2] << 2) and the EDX:EAX input to the TPAUSE instruction. 2. As of now, ZXPAUSE exclusively supports the C0.1 optimization state, whereas TPAUSE potentially extends support to both C0.1 and C0.2. Successful integration of this patch hinges on QEMU's backing for ZXPAUSE, a contribution we're currently forwarding to QEMU. It also requires the preceding patch in this patchset, which offers Linux kernel support for ZXPAUSE. The choice of the name "vmx->msr_ia32_umwait_control" is deliberate. In patches for other Linux versions (e.g., 5.5), a "vmx->msr_ia32_umwait_control" already exists. By sharing this variable name with Intel, it ensures compatibility. The difference is merely software-based and poses no real-world conflicts. Currently, if the Guest writes to the ZXPAUSE/TPAUSE CONTROL MSR, we simply bypass the WRMSR instruction. If the Guest attempts to use ZXPAUSE/TPAUSE to transition the vCPU into an optimized state, it will succeed, with the duration of the optimized state being the value passed in EDX:EAX. Of course, this state can be interrupted by external interrupts and other events specified in the specification. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2710 --- arch/x86/include/asm/msr-index.h | 7 +++ arch/x86/include/asm/vmx.h | 7 +++ arch/x86/include/asm/vmxfeatures.h | 6 ++- arch/x86/kernel/cpu/feat_ctl.c | 10 ++++ arch/x86/kvm/cpuid.c | 12 ++++- arch/x86/kvm/reverse_cpuid.h | 1 + arch/x86/kvm/vmx/capabilities.h | 7 +++ arch/x86/kvm/vmx/vmcs.h | 2 + arch/x86/kvm/vmx/vmx.c | 68 +++++++++++++++++++++++++- arch/x86/kvm/vmx/vmx.h | 19 +++++++ arch/x86/kvm/x86.c | 6 ++- tools/arch/x86/include/asm/msr-index.h | 7 +++ 12 files changed, 147 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b7df4d09b467..d4e1504c6f23 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -778,6 +778,13 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0e73616b82f3..3a4f60f19de3 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,6 +84,11 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) +/* + * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. + */ +#define ZX_TERTIARY_EXEC_GUEST_ZXPAUSE VMCS_CONTROL_BIT(GUEST_ZXPAUSE) + #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -235,6 +240,7 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, + ZXPAUSE_VMEXIT_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -284,6 +290,7 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, + ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index c6a7eed03914..ba209bdf57d9 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 5 /* N 32-bit words worth of info */ +#define NVMXINTS 6 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -89,4 +89,8 @@ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ + +/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 4 */ +#define VMX_FEATURE_GUEST_ZXPAUSE (4*32 + 0) /* zxpause instruction in guest mode */ + #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 03851240c3e3..3e0fbf510f1c 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -17,6 +17,7 @@ enum vmx_feature_leafs { SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, + ZX_TERTIARY_CTLS, NR_VMX_FEATURE_WORDS, }; @@ -97,6 +98,15 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); + /* + * Initialize Zhaoxin Tertiary Exec Control feature flags. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &supported, &ign); + if (supported & MSR_ZX_VMCS_EXEC_CTL3) + c->vmx_capability[ZX_TERTIARY_CTLS] |= VMX_F(GUEST_ZXPAUSE); + } } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ca52b05c1f4f..60019654f650 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -790,6 +790,9 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); + /* Zhaoxin 0xC0000006 leaf */ + kvm_cpu_cap_mask(CPUID_C000_0006_EAX, 0 /* bit0: zxpause */ | 0 /* bit1 HMAC */); + /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and @@ -1304,17 +1307,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /*Just support up to 0xC0000004 now*/ - entry->eax = min(entry->eax, 0xC0000004); + /* Extended to 0xC0000006 */ + entry->eax = min(entry->eax, 0xC0000006); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; + case 0xC0000006: + cpuid_entry_override(entry, CPUID_C000_0006_EAX); + break; + case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: + case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index aadefcaa9561..5d8e47371c7c 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -90,6 +90,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, + [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, }; /* diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f9989..631e65a21228 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -60,6 +60,7 @@ struct vmcs_config { u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; + u32 zx_cpu_based_3rd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; @@ -255,6 +256,12 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } +static inline bool cpu_has_vmx_zxpause(void) +{ + return vmcs_config.zx_cpu_based_3rd_exec_ctrl & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 7c1996b433e2..4eabed8e5813 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -50,7 +50,9 @@ struct vmcs_controls_shadow { u32 pin; u32 exec; u32 secondary_exec; + u32 zx_tertiary_exec; u64 tertiary_exec; + u64 zx_vmexit_tsc; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f54929487b84..5c2464e7b0d9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -214,6 +214,8 @@ module_param(ple_window_max, uint, 0444); int __read_mostly pt_mode = PT_MODE_SYSTEM; module_param(pt_mode, int, S_IRUGO); +static u32 zx_ext_vmcs_cap; + static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_MUTEX(vmx_l1d_flush_mutex); @@ -2015,7 +2017,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) return 1; - + msr_info->data = vmx->msr_ia32_umwait_control; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: @@ -2275,7 +2281,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) return 1; + vmx->msr_ia32_umwait_control = data; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; + /* The reserved bit 1 and non-32 bit [63:32] should be zero */ + if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) + return 1; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: @@ -2733,6 +2747,10 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; + /* Setup Zhaoxin exec-cntl3 VMCS field. */ + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) + vmcs_conf->zx_cpu_based_3rd_exec_ctrl |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + #if IS_ENABLED(CONFIG_HYPERV) if (enlightened_vmcs) evmcs_sanitize_exec_ctrls(vmcs_conf); @@ -4526,6 +4544,28 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } +static u32 vmx_zx_tertiary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; + + /* + * Show errors if Qemu wants to enable guest_zxpause while + * vmx not support it. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_ZXPAUSE)) { + if (!cpu_has_vmx_zxpause()) + pr_err("VMX not support guest_zxpause!\n"); + else + exec_control |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + } else + exec_control &= ~ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + + /* enable other features here */ + + return exec_control; +} + /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4732,6 +4772,11 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_secondary_exec_ctrls()) secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (cpu_has_tertiary_exec_ctrls()) tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx)); @@ -6261,6 +6306,13 @@ void dump_vmcs(struct kvm_vcpu *vcpu) else tertiary_exec_control = 0; + pr_err("*** Zhaoxin Specific Fields ***\n"); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", + vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); + pr_err("ZXPAUSE Saved TSC = 0x%016llx\n", vmcs_read64(ZXPAUSE_VMEXIT_TSC)); + } + pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); pr_err("*** Guest State ***\n"); @@ -7788,6 +7840,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -7918,6 +7975,9 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + + if (cpu_has_vmx_zxpause()) + kvm_cpu_cap_check_and_set(X86_FEATURE_ZXPAUSE); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8487,6 +8547,12 @@ static __init int hardware_setup(void) unsigned long host_bndcfgs; struct desc_ptr dt; int r; + u32 ign; + + /* Caches Zhaoxin extend VMCS capabilities. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); store_idt(&dt); host_idt_base = dt.address; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 45cee1a8bc0a..f566b9984f19 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -580,6 +580,17 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) +#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 +#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL \ + (ZX_TERTIARY_EXEC_GUEST_ZXPAUSE) + +/* + * We shouldn't rw zxpause_vmexit_tsc vmcs field in this + * way, try to use another way in the future. + */ +#define KVM_REQUIRED_VMX_ZXPAUSE_VMEXIT_TSC 0 +#define KVM_OPTIONAL_VMX_ZXPAUSE_VMEXIT_TSC 1 + #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -612,6 +623,8 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) +BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) +BUILD_CONTROLS_SHADOW(zx_vmexit_tsc, ZXPAUSE_VMEXIT_TSC, 64) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -714,6 +727,12 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } +static inline bool vmx_guest_zxpause_enabled(struct vcpu_vmx *vmx) +{ + return zx_tertiary_exec_controls_get(vmx) & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e4981e06ede..db86f36c38a0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1462,8 +1462,8 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, - MSR_IA32_XFD, MSR_IA32_XFD_ERR, + MSR_ZX_PAUSE_CONTROL, }; static const u32 msrs_to_save_pmu[] = { @@ -7171,6 +7171,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; + case MSR_ZX_PAUSE_CONTROL: + if (!kvm_cpu_cap_has(X86_FEATURE_ZXPAUSE)) + return; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 06c4386a620b..3456f6deca51 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -764,6 +764,13 @@ #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0x00000000 #define MSR_IA32_P5_MC_TYPE 0x00000001 -- Gitee From 06b583a4c8bac969016033a6e821c4267139bb9d Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Tue, 14 Nov 2023 15:23:06 +0800 Subject: [PATCH 887/953] anolis: arm64: reposition mapping flag to make them visible ANBZ: #8540 If splitting linear mapping is enabled, mapping flags can be reused in other places. Thus they should be repositioned to support dynamicly splitting kernel page table. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 4 ++++ arch/arm64/mm/mmu.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 94b68850cb9f..41e393e5c7fd 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -12,6 +12,10 @@ #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) #define TTBR_ASID_MASK (UL(0xffff) << 48) +#define NO_BLOCK_MAPPINGS BIT(0) +#define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3e26d444569e..1e637c1e65eb 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -41,10 +41,6 @@ #include #include -#define NO_BLOCK_MAPPINGS BIT(0) -#define NO_CONT_MAPPINGS BIT(1) -#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ - int idmap_t0sz __ro_after_init; #if VA_BITS > 48 -- Gitee From f3692f2ef170f418df7cb9d1eade59b76031c825 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Tue, 14 Nov 2023 15:30:29 +0800 Subject: [PATCH 888/953] anolis: arm64: add page table entry tlb flush helper ANBZ: #8540 Current tlb flush helper __flush_tlb_kernel_pgtable() would flush all entries including non-leaf PUD/PMD, which was not needed. Thus add new tlbflush helper that only flushes the leaf PUD/PMD/PTE entry. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/tlbflush.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b149cf9f91bc..e54f4343543a 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -223,6 +223,10 @@ static inline unsigned long get_trans_granule(void) * determined by 'stride' and only affect any walk-cache entries * if 'last_level' is equal to false. * + * __flush_tlb_kernel_pgtable_entry(addr) + * Invalidate a single kernel mapping for address "addr" on all + * CPUs. Must be called if the corresponding page table is + * last_level entry. * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -479,6 +483,20 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +/* + * Used to invalidate the TLB entries to the last level page table + * (pud/pmd/pte). + */ +static inline void __flush_tlb_kernel_pgtable_entry(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + dsb(ishst); + __tlbi(vaale1is, addr); + dsb(ish); + isb(); +} #endif #endif -- Gitee From be9e72c858e12010aeb13f51cde7e3093a1f5cb1 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:14:56 +0800 Subject: [PATCH 889/953] anolis: arm64: support splitting the block mapping kernel page table ANBZ: #8540 When the linear mapping range adopts block or contiguous mapping, the kernel can no longer support to modify the attribute of kernel page table in PTE granularity. Scenarios like RODATA/KFENCE/Crash kernel/Memory failure needs the capability to modify the page table attribute in PTE granularity to ensure the correctness and reliability. However, if the linear mapping range is mapped by PTE level, the ratio of TLB miss would increase obviously and the performance of programs would decrease when accessing the linear mapping areas. Currently, the performance and the attribute control of PTE level are incompatible. In practice, changing the page table attributes of PTE granularity is a low-frequency and occasional behavior, which gives us the opportunity to selectively split the block/contiguous mapping into PTE granularity to achieve both requirements above. This patch provides splitting the block/contiguous mapping dynamicly. To avoid the behavior of TLB prefetch, the process of splitting follows break-before-make principle to avoid TLB conflict. Meanwhile, if cleared the kernel page table entry, it is necessary to avoid accessing corresponding address that might be cleared before. In the system boot stage, only one CPU is working. There is no need to consider other CPUs access the cleared range simultaneously. In addition, when initializing the kernel page table, we should avoid the situation that the physical range that the cleared page table entry pointed to contains the physical address of the page table entry. It would cause a kernel page fault if dynamically splitting the page table entry later. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/include/asm/pgtable.h | 9 + arch/arm64/mm/mmu.c | 353 ++++++++++++++++++++++++++++++- 3 files changed, 359 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 41e393e5c7fd..b290d04db44b 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -76,6 +76,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); +extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 07bdf5dd8ebe..a0a07932d6e6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -256,6 +256,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } +static inline pmd_t pmd_mknoncont(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); +} + static inline pte_t pte_mkdevmap(pte_t pte) { return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); @@ -491,6 +496,7 @@ static inline int pmd_trans_huge(pmd_t pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_exec(pmd) (!(pmd_val(pmd) & PMD_TABLE_PXN)) static inline pmd_t pmd_mkinvalid(pmd_t pmd) { @@ -685,6 +691,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_user(pud) pte_user(pud_pte(pud)) #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) +#define pud_exec(pud) (!(pud_val(pud) & PUD_TABLE_PXN)) static inline void set_pud(pud_t *pudp, pud_t pud) { @@ -752,6 +759,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define p4d_none(p4d) (!p4d_val(p4d)) #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) #define p4d_present(p4d) (p4d_val(p4d)) +#define p4d_exec(p4d) (!(p4d_val(p4d) & P4D_TABLE_PXN)) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { @@ -798,6 +806,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) /* Match pud_offset folding in */ +#define pud_offset_phys(dir, addr) NULL #define pud_set_fixmap(addr) NULL #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) #define pud_clear_fixmap() diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1e637c1e65eb..6474a2945f8a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -165,6 +165,49 @@ bool pgattr_change_is_safe(u64 old, u64 new) return ((old ^ new) & ~mask) == 0; } +/* + * If the physical address of block-mapping pud/pmd or contiguous mapping pmd/pte + * entry is located in the physical range it points to, clearing the entry would + * cause the corresponding physcial range can not be accessed any longer. The + * remapping process of this range can not be done because of inaccessible. + * For this case, it should be mapped with PTE level when initializing the page + * table. + */ +static bool should_clear_cont_pte(pmd_t *pmdp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pte_offset_phys(pmdp, addr); + + return (pa >> CONT_PTE_SHIFT) == (phys >> CONT_PTE_SHIFT); +} + +static bool should_clear_cont_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> CONT_PMD_SHIFT) == (phys >> CONT_PMD_SHIFT); +} + +static bool should_split_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> PMD_SHIFT) == (phys >> PMD_SHIFT); +} + +#ifndef __PAGETABLE_PUD_FOLDED +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pud_offset_phys(p4dp, addr); + + return (pa >> PUD_SHIFT) == (phys >> PUD_SHIFT); +} +#else +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + return false; +} +#endif + static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot) { @@ -219,7 +262,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, /* use a contiguous mapping if the range is suitably aligned */ if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && - (flags & NO_CONT_MAPPINGS) == 0) + (flags & NO_CONT_MAPPINGS) == 0 && + !should_clear_cont_pte(pmdp, addr, phys)) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); init_pte(pmdp, addr, next, phys, __prot); @@ -236,6 +280,14 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, pmd_t *pmdp; pmdp = pmd_set_fixmap_offset(pudp, addr); + /* + * the physical address of PMDs with contiguous flag might locate in the + * physical range they point to. Thus clear the CONT flag earlier to + * avoid inaccessiable situation. + */ + if (should_clear_cont_pmd(pudp, addr, phys)) + prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); + do { pmd_t old_pmd = READ_ONCE(*pmdp); @@ -243,7 +295,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~PMD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pmd(pudp, addr, phys)) { pmd_set_huge(pmdp, phys, prot); /* @@ -336,11 +389,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); /* - * For 4K granule only, attempt to put down a 1GB block + * For 4K granule only, attempt to put down a 1GB block. If the + * physical address of pudp is included in the range where + * itself points to, split the block of pudp earlier. */ if (pud_sect_supported() && ((addr | next | phys) & ~PUD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pud(p4dp, addr, phys)) { pud_set_huge(pudp, phys, prot); /* @@ -1486,3 +1542,292 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte { set_pte_at(vma->vm_mm, addr, ptep, pte); } + +static void clear_cont_pte_mapping(pmd_t *pmdp, unsigned long addr, + unsigned long end) +{ + pte_t *ptep, *sptep, pte; + unsigned long saddr, next; + int i; + + /* + * Clear the CONT flag of ptes at the input range. CONT flag should be + * cleared at the granularity of CONT_PTE. + */ + addr &= CONT_PTE_MASK; + if (end & ~CONT_PTE_MASK) + end = (end + CONT_PTE_SIZE) & CONT_PTE_MASK; + + do { + pgprot_t prot; + unsigned long pfn; + + saddr = addr; + next = pte_cont_addr_end(addr, end); + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + if (pte_none(pte)) + continue; + + if (pte_cont(READ_ONCE(*ptep))) { + sptep = ptep; + prot = pte_pgprot(pte_mknoncont(pte)); + pfn = pte_pfn(pte); + + /* + * Changing the bit of contiguous entries requires to + * follow Break-Before-Make approach. See ARM DDI + * 0487A.k_iss10775, "Misprogramming of the Contiguous bit", + * page D4-1762. + */ + for (i = 0; i < CONT_PTES; i++, ptep++) + pte_clear(&init_mm, addr, ptep); + + for (i = 0; i < CONT_PTES; i++, saddr += PAGE_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PTES; i++, sptep++, pfn++) + set_pte(sptep, pfn_pte(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void clear_cont_pmd_mapping(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + pmd_t *pmdp, *spmdp, pmd; + unsigned long saddr, next; + int i; + + addr &= CONT_PMD_MASK; + if (end & ~CONT_PMD_MASK) + end = (end + CONT_PMD_SIZE) & CONT_PMD_MASK; + + do { + pgprot_t prot; + unsigned long pfn, pfn_offset = PMD_SIZE >> PAGE_SHIFT; + + saddr = addr; + next = pmd_cont_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (pte_cont(pmd_pte(pmd))) { + spmdp = pmdp; + prot = pte_pgprot(pmd_pte(pmd_mknoncont(pmd))); + pfn = pmd_pfn(pmd); + + for (i = 0; i < CONT_PMDS; i++, pmdp++) + pmd_clear(pmdp); + + for (i = 0; i < CONT_PMDS; i++, saddr += PMD_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PMDS; i++, spmdp++, pfn += pfn_offset) + set_pmd(spmdp, pfn_pmd(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void split_pmd_mapping(pud_t *pudp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pmd_t *pmdp, pmd, split_pmd; + unsigned long next; + int new_flags = 0; + + /* + * Clear the contiguous pmd if there is any splitting request located in + * the corresponding range. + */ + if (flags & NO_CONT_MAPPINGS) + clear_cont_pmd_mapping(pudp, addr, end); + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (!pmd_exec(pmd)) + flags |= NO_EXEC_MAPPINGS; + + if (pmd_sect(pmd)) { + phys_addr_t phys, pte_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + /* + * Get the original protections except PMD_SECT. + */ + orig_prot = __pgprot(pgprot_val(pte_pgprot(pmd_pte(pmd))) | + PMD_TYPE_TABLE); + + /* + * Allocate a new pmd page to re-initialize + * corresponding ptes. + */ + pte_phys = pgd_pgtable_alloc(PAGE_SHIFT); + split_pmd = pfn_pmd(__phys_to_pfn(pte_phys), orig_prot); + + /* + * If addr/next is not PMD aligned, create contiguous + * mapping at the rest of specific split range. + */ + if (addr & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, addr & PMD_MASK, addr, + phys & PMD_MASK, prot, + pgd_pgtable_alloc, new_flags); + if (next & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, next, + (next + PMD_SIZE) & PMD_MASK, + phys + next - addr, prot, + pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pte(&split_pmd, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pmd entry and flush it, then set + * the newly allocated pmd page. + */ + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pmd(pmdp, split_pmd); + } else { + clear_cont_pte_mapping(pmdp, addr, next); + } + } while (addr = next, addr < end); +} + +static void split_pud_mapping(p4d_t *p4dp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pud_t *pudp, pud, split_pud; + unsigned long next; + int new_flags = 0; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + + if (!pud_exec(pud)) + flags |= NO_EXEC_MAPPINGS; + + if (pud_sect(pud)) { + phys_addr_t phys, pmd_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + orig_prot = __pgprot(pgprot_val(pte_pgprot(pud_pte(pud))) | + PUD_TYPE_TABLE); + + pmd_phys = pgd_pgtable_alloc(PMD_SHIFT); + split_pud = pfn_pud(__phys_to_pfn(pmd_phys), orig_prot); + + /* + * If addr/next is not PUD aligned, create block and + * contiguous mapping at the rest of specific split range. + */ + if (addr & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, addr & PUD_MASK, + addr, phys & PUD_MASK, + prot, pgd_pgtable_alloc, new_flags); + if (next & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, next, + (next + PUD_SIZE) & PUD_MASK, + phys + next - addr, + prot, pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pmd(&split_pud, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pud entry and flush it, then set + * the newly allocated pud page. + */ + pud_clear(pudp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pud(pudp, split_pud); + } else { + split_pmd_mapping(pudp, addr, next, prot, flags); + } + } while (addr = next, addr < end); +} + +static void split_p4d_mapping(pgd_t *pgdp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + p4d_t *p4dp, p4d; + unsigned long next; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + +#if CONFIG_PGTABLE_LEVELS > 3 + /* + * If the original p4d mapping is not executable, remain it even + * splitting. + */ + if (!p4d_exec(p4d)) + flags |= NO_EXEC_MAPPINGS; +#endif + + split_pud_mapping(p4dp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) +{ + pgd_t *pgdp, pgd; + unsigned long addr, next, end; + int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + addr = virt & PAGE_MASK; + end = PAGE_ALIGN(virt + size); + prot = pgprot_tagged(prot); + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + + split_p4d_mapping(pgdp, addr, next, prot, flags); + } while (addr = next, addr < end); +} -- Gitee From 17b417cee13c3c38f7273d39d01250689275e929 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:33:52 +0800 Subject: [PATCH 890/953] anolis: arm64: support splitting page tables after system init ANBZ: #8540 RODATA, KFENCE and memory failure require dynamically splitting of page tables after system boot. After system boot stage, other CPUs may access the cleared page table while splitting the page table, and the stop-machine mechanism needs to be used to avoid the access of other CPUs. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 3 ++- arch/arm64/mm/mmu.c | 46 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index b290d04db44b..bad28b274467 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -77,7 +77,8 @@ extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); - +extern void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6474a2945f8a..8cda2a3d25cf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,15 @@ EXPORT_SYMBOL(empty_zero_page); static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); +static DEFINE_MUTEX(split_linear_mapping_lock); + +static struct split_memory_params { + unsigned long virt; + phys_addr_t size; + pgprot_t prot; + + atomic_t cpu_count; +} split_memory_param; void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -1831,3 +1841,39 @@ void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) split_p4d_mapping(pgdp, addr, next, prot, flags); } while (addr = next, addr < end); } + +static int __split_linear_mapping_after_init(void *data) +{ + struct split_memory_params *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + split_linear_mapping(param->virt, param->size, param->prot); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + return 0; +} + +/* + * When splitting the kernel page table through the Break-Before-Make principle, + * other CPUs might access address that mapped by a cleared entry before + * remapping. Thus the stop machine is used to avoid kernel page fault + * caused by inter-CPU synchronization. + */ +void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot) + +{ + mutex_lock(&split_linear_mapping_lock); + + split_memory_param.virt = virt; + split_memory_param.size = size; + split_memory_param.prot = prot; + atomic_set(&split_memory_param.cpu_count, 0); + + stop_machine(__split_linear_mapping_after_init, &split_memory_param, cpu_online_mask); + + mutex_unlock(&split_linear_mapping_lock); +} -- Gitee From a7081fc2e0433e97be69a4293c7481ea1d617664 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:48:55 +0800 Subject: [PATCH 891/953] anolis: arm64: support splitting page table of rodata_full enabled ANBZ: #8540 If rodata_full is enabled, each page mapped by the linear mapping range can be set the attribute to Read-Only according to the PTE. Thus, the linear mapping range has to be mapped by all PTE-level to support that. However, changing the attribute to Read-Only is not a frequent event. Splitting the corresponding page is more suitable while guaranteeing the performance. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/set_memory.h | 2 ++ arch/arm64/mm/mmu.c | 4 ++-- arch/arm64/mm/pageattr.c | 19 +++++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b781187..20fb7b1d5423 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -8,6 +8,8 @@ bool can_set_direct_map(void); #define can_set_direct_map can_set_direct_map +bool can_set_block_and_cont_map(void); + int set_memory_valid(unsigned long addr, int numpages, int enable); int set_direct_map_invalid_noflush(struct page *page); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8cda2a3d25cf..633cc234d921 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -649,7 +649,7 @@ static void __init map_mem(pgd_t *pgdp) early_kfence_pool = arm64_kfence_alloc_pool(); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1375,7 +1375,7 @@ int arch_add_memory(int nid, u64 start, u64 size, VM_BUG_ON(!mhp_range_allowed(start, size, true)); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 924843f1f661..be87ffb54153 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -33,6 +33,15 @@ bool can_set_direct_map(void) arm64_kfence_can_set_direct_map(); } +/* + * If rodata_full is enabled, the mapping of linear mapping range can also be + * block & cont mapping, here decouples the rodata_full and debug_pagealloc. + */ +bool can_set_block_and_cont_map(void) +{ + return !debug_pagealloc_enabled() && !arm64_kfence_can_set_direct_map(); +} + static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; @@ -108,6 +117,16 @@ static int change_memory_common(unsigned long addr, int numpages, if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { for (i = 0; i < area->nr_pages; i++) { + unsigned long virt = (unsigned long)page_address(area->pages[i]); + + /* + * Only split the linear mapping when the attribute is + * changed to read only. Other situations do not suffer + * the mapping type. + */ + if (pgprot_val(set_mask) == PTE_RDONLY && can_set_block_and_cont_map()) + split_linear_mapping_after_init(virt, PAGE_SIZE, PAGE_KERNEL); + __change_memory_common((u64)page_address(area->pages[i]), PAGE_SIZE, set_mask, clear_mask); } -- Gitee From 800cc08f1ff765f49472c33aff3ffc3ec6e671ed Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 10:44:25 +0800 Subject: [PATCH 892/953] anolis: arm64: support splitting the dynamic allocation range of kfence ANBZ: #8540 Kfence provides the function of dynamically allocating the kfence pool. However, in arm64, the function is supported only the linear mapping range is all PTE-level mapped. Based on splitting dynamically, the kfence pool allocation no longer relies on all PTE-level mapped linear mapping range, but just split the range it allocated into PTE-level. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/kfence.h | 13 ++++++++++++- arch/arm64/mm/pageattr.c | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index e5f86bbf4348..df7ebc9fc416 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -14,7 +14,18 @@ static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { - return can_set_direct_map(); + unsigned long addr = (unsigned long)kpa->addr; + + if (!can_set_block_and_cont_map()) + return false; + + /* + * If the allocated range is block and contiguous mapping, split it + * to pte level before re-initializing kfence pages. + */ + split_linear_mapping_after_init(addr, kpa->pool_size, PAGE_KERNEL); + + return true; } static inline bool kfence_protect_page(unsigned long addr, bool protect) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index be87ffb54153..eedea185a2fd 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -39,7 +39,7 @@ bool can_set_direct_map(void) */ bool can_set_block_and_cont_map(void) { - return !debug_pagealloc_enabled() && !arm64_kfence_can_set_direct_map(); + return !debug_pagealloc_enabled(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) -- Gitee From 7ea638598253ad58ab6b1d9167ae99e16eb0112c Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 11:46:16 +0800 Subject: [PATCH 893/953] anolis: arm64: replace can_set_direct_map by splitting linear mapping ANBZ: #8540 Currently, can_set_direct_map has no callers any more, replace it with another function to indicate the linear region can be split. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/kfence.h | 11 +++-------- arch/arm64/include/asm/set_memory.h | 3 --- arch/arm64/mm/pageattr.c | 28 +++++++--------------------- include/linux/set_memory.h | 12 ------------ 4 files changed, 10 insertions(+), 44 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index df7ebc9fc416..44994e2a6d88 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,10 +8,13 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#ifdef CONFIG_KFENCE #include #include +extern bool kfence_early_init; + static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { unsigned long addr = (unsigned long)kpa->addr; @@ -37,14 +40,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } -#ifdef CONFIG_KFENCE -extern bool kfence_early_init; -static inline bool arm64_kfence_can_set_direct_map(void) -{ - return !kfence_early_init; -} -#else /* CONFIG_KFENCE */ -static inline bool arm64_kfence_can_set_direct_map(void) { return false; } #endif /* CONFIG_KFENCE */ #endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 20fb7b1d5423..3f5d866b98d0 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -5,9 +5,6 @@ #include -bool can_set_direct_map(void); -#define can_set_direct_map can_set_direct_map - bool can_set_block_and_cont_map(void); int set_memory_valid(unsigned long addr, int numpages, int enable); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index eedea185a2fd..d20986e457a7 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -20,19 +20,6 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); -bool can_set_direct_map(void) -{ - /* - * rodata_full and DEBUG_PAGEALLOC require linear map to be - * mapped at page granularity, so that it is possible to - * protect/unprotect single pages. - * - * KFENCE pool requires page-granular mapping if initialized late. - */ - return rodata_full || debug_pagealloc_enabled() || - arm64_kfence_can_set_direct_map(); -} - /* * If rodata_full is enabled, the mapping of linear mapping range can also be * block & cont mapping, here decouples the rodata_full and debug_pagealloc. @@ -188,8 +175,9 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -203,8 +191,9 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -214,7 +203,7 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!can_set_direct_map()) + if (can_set_block_and_cont_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); @@ -238,9 +227,6 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - if (!can_set_direct_map()) - return true; - pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 95ac8398ee72..7ca06b42672d 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -38,18 +38,6 @@ static inline bool kernel_page_present(struct page *page) { return true; } -#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -/* - * Some architectures, e.g. ARM64 can disable direct map modifications at - * boot time. Let them overrive this query. - */ -#ifndef can_set_direct_map -static inline bool can_set_direct_map(void) -{ - return true; -} -#define can_set_direct_map can_set_direct_map -#endif #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef CONFIG_X86_64 -- Gitee From b3c1cafb7e4f41f1711b1fe60e4868961809600e Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 9 Nov 2022 11:17:05 +0800 Subject: [PATCH 894/953] anolis: arm64: add the helper to set no-present page table ANBZ: #8540 Add a helper set_memory_np to make the page table corresponding to the virtual address not present. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/mm/pageattr.c | 12 ++++++++++++ include/asm-generic/set_memory.h | 2 +- include/linux/set_memory.h | 1 + 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index d20986e457a7..801ac339298a 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -168,6 +168,18 @@ int set_memory_valid(unsigned long addr, int numpages, int enable) __pgprot(PTE_VALID)); } +int set_memory_np(unsigned long addr, int numpages) +{ + /* + * If the addr belongs to linear mapping range, split it to pte level + * before changing the attribute of the page table. + */ + if (can_set_block_and_cont_map() && __is_lm_address(addr)) + split_linear_mapping_after_init(addr, PAGE_SIZE * numpages, PAGE_KERNEL); + + return set_memory_valid(addr, numpages, 0); +} + int set_direct_map_invalid_noflush(struct page *page) { struct page_change_data data = { diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h index c86abf6bc7ba..caad5193913c 100644 --- a/include/asm-generic/set_memory.h +++ b/include/asm-generic/set_memory.h @@ -9,5 +9,5 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); - +int set_memory_np(unsigned long addr, int numpages); #endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 7ca06b42672d..e77a3345d20b 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -12,6 +12,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_np(unsigned long addr, int numpages) { return 0; } #endif #ifndef set_memory_rox -- Gitee From 098532d6d274948160913bb427df10ba003cb5e6 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 17:46:50 +0800 Subject: [PATCH 895/953] anolis: mm: avoid speculative access after memory failure ANBZ: #8540 Memory failure would pass an interrupt when the hardware detects a memory page corrupted, then the kernel needs to unmap uncorrectable pages to avoid access and allocation. However, in the implementation like arm64, the uncorrectable page is unmapped only in the page table of user-space process, the page table of the linear mapping range is not considered. It incurs the cpu will continue to accept errors through interrupts because of speculative access. For that, the uncorrectable page needs to be set to invalid to avoid speculative access. If the linear mapping is block mapping, we should firstly split it to pte level. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- mm/memory-failure.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 455093f73a70..ca738ab7c787 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -60,6 +60,7 @@ #include #include #include +#include #include "swap.h" #include "internal.h" #include "ras/ras_event.h" @@ -2440,10 +2441,17 @@ static void memory_failure_work_func(struct work_struct *work) spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; - if (entry.flags & MF_SOFT_OFFLINE) + if (entry.flags & MF_SOFT_OFFLINE) { soft_offline_page(entry.pfn, entry.flags); - else - memory_failure(entry.pfn, entry.flags); + } else if (!memory_failure(entry.pfn, entry.flags)) { + /* + * If the pfn reported by ghes can not be recovered, set + * the corresponding page table of linear mapping range + * to be non-present, which avoids the speculative + * access of corrupted memory. + */ + set_memory_np((unsigned long)page_to_virt(pfn_to_page(entry.pfn)), 1); + } } } -- Gitee From 181ae64c25de2bf3d818d7fd6d99b7a73a38e883 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 7 Dec 2023 10:21:13 +0800 Subject: [PATCH 896/953] anolis: arm64: use XN table mapping attributes for the kfence region ANBZ: #8540 Kfence region is located in the linear region, thus we also need to set PXN attributes for all table entries in the kfence region. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 633cc234d921..1884c70f359b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -617,7 +617,7 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) /* KFENCE pool needs page-level mapping. */ __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS | NO_EXEC_MAPPINGS); memblock_clear_nomap(kfence_pool, kfence_pool_size); __kfence_pool_early_init = phys_to_virt(kfence_pool); } -- Gitee From 743ab9475a44947506d25b762f48c67faba74aba Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 14:23:31 +0800 Subject: [PATCH 897/953] anolis: Add early quirk to identify kh-40000 ANBZ: #8663 Identify kh-40000 platforms by specific PCI device's version number. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- arch/x86/kernel/early-quirks.c | 19 +++++++++++++++++++ include/linux/dma-map-ops.h | 10 ++++++++++ 2 files changed, 29 insertions(+) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a6c1867fc7aa..ddb857d94ed9 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -685,6 +687,19 @@ static void __init apple_airport_reset(int bus, int slot, int func) early_iounmap(mmio, BCM4331_MMIO_SIZE); } +bool is_zhaoxin_kh40000; + +static void quirk_zhaoxin_dma_patch(int num, int slot, int func) +{ + u8 revision; + + revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); + if (revision == 0x10) { + is_zhaoxin_kh40000 = true; + pr_info("zhaoxin direct dma patch enabled\n"); + } +} + #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -728,6 +743,10 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, + { PCI_VENDOR_ID_ZHAOXIN, 0x1001, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, + { PCI_VENDOR_ID_ZHAOXIN, 0x345B, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, {} }; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1..f8451912178a 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -509,4 +509,14 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, } #endif /* CONFIG_PCI_P2PDMA */ +#if defined CONFIG_PCI && defined CONFIG_X86 + +extern bool is_zhaoxin_kh40000; + +#else + +bool __weak is_zhaoxin_kh40000; + +#endif + #endif /* _LINUX_DMA_MAP_OPS_H */ -- Gitee From 4e7425bd8a7c97883949ee4504a29a8cf986656a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 20:35:27 +0800 Subject: [PATCH 898/953] anolis: Add kh40000_direct_dma_ops for KH-40000 platform ANBZ: #8663 Add 'kh40000_direct_dma_ops' to replace 'direct_dma_ops' for KH-40000 platform. For coherent DMA access, memory can be allocated only from the memory node of the node where the device resides. For streaming DMA access, add a PCI read operation at the end of DMA access. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- .../admin-guide/kernel-parameters.txt | 5 + arch/x86/kernel/Makefile | 1 + arch/x86/kernel/early-quirks.c | 1 + arch/x86/kernel/zhaoxin_kh40000.c | 176 ++++++++++++++++++ include/linux/dma-map-ops.h | 1 + kernel/dma/contiguous.c | 3 + 6 files changed, 187 insertions(+) create mode 100644 arch/x86/kernel/zhaoxin_kh40000.c diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index ee22d18ca8ab..18277efe37a1 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2322,6 +2322,11 @@ isapnp= [ISAPNP] Format: ,,, + zhaoxin_patch_bitmask= + [X86] Bitmask for Zhaoxin Platform's patch. + bit 0: enable KH-40000 dma patch's node check function + + isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,] diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c25d40cbbdbe..2b433325ca8f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -159,6 +159,7 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o + obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ddb857d94ed9..b5f5e0916894 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -696,6 +696,7 @@ static void quirk_zhaoxin_dma_patch(int num, int slot, int func) revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); if (revision == 0x10) { is_zhaoxin_kh40000 = true; + dma_ops = &kh40000_dma_direct_ops; pr_info("zhaoxin direct dma patch enabled\n"); } } diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c new file mode 100644 index 000000000000..c477b18892fa --- /dev/null +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../../../kernel/dma/direct.h" + +/*** + * usage: + * set "zhaoxin_patch_bitmask=" in cmdline + * value description: + * bit 0: enable(1) node check or not(0). default 1 + */ +enum { + ZHAOXIN_P2CW_NODE_CHECK = BIT(0), + ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK, +}; + +#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK + +unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT; + +static int __init zhaoxin_patch_code_setup(char *str) +{ + int err = kstrtoul(str, 0, &zhaoxin_patch_code); + + if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) { + pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n", + str); + return err; + } + + if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code) + pr_info("zhaoxin dma patch node check is enabled\n"); + + return 0; +} +__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup); + +static struct pci_dev *kh40000_get_pci_dev(struct device *dev) +{ + if (dev_is_pci(dev)) + return to_pci_dev(dev); + + if (dev->parent) + return kh40000_get_pci_dev(dev->parent); + + return NULL; +} + +static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr, + enum dma_data_direction dir, bool is_iommu) +{ + u8 vid; + struct pci_dev *pci; + u64 dma_mask = *dev->dma_mask; + + /* check direction */ + if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL)) + return; + + /* check dma capability */ + if (dma_mask <= DMA_BIT_MASK(32)) + return; + + /* check device type */ + pci = kh40000_get_pci_dev(dev); + if (pci == NULL) + return; + + /* get real physical address */ + if (is_iommu) { + struct iommu_domain *domain = iommu_get_dma_domain(dev); + + paddr = iommu_iova_to_phys(domain, paddr); + if (!paddr) + return; + } + + /* check node or not */ + if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK) + && pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev)) + return; + + /* flush data by one pci read cycle */ + pci_read_config_byte(pci, PCI_VENDOR_ID, &vid); +} + +/* zhaoxin kh-40000 direct dma ops */ +static void *kh40000_dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + if (dev->coherent_dma_mask > DMA_BIT_MASK(32)) + gfp |= __GFP_THISNODE; + + return dma_direct_alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_unmap_page(dev, addr, size, dir, attrs); +} + +static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} + +static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_unmap_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); +} + +const struct dma_map_ops kh40000_dma_direct_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_dma_direct_alloc, + .sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu, + .unmap_page = kh40000_dma_direct_unmap_page, + .sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu, + .unmap_sg = kh40000_dma_direct_unmap_sg, + .unmap_resource = kh40000_dma_direct_unmap_resource, + .dma_supported = dma_direct_supported, + .free = dma_direct_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, + .get_required_mask = dma_direct_get_required_mask, + .max_mapping_size = dma_direct_max_mapping_size, + .mmap = dma_direct_mmap, + .get_sgtable = dma_direct_get_sgtable, + .map_page = dma_direct_map_page, + .map_sg = dma_direct_map_sg, + .map_resource = dma_direct_map_resource, +}; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f8451912178a..aa2255e1b9a1 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -512,6 +512,7 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, #if defined CONFIG_PCI && defined CONFIG_X86 extern bool is_zhaoxin_kh40000; +extern const struct dma_map_ops kh40000_dma_direct_ops; #else diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index f005c66f378c..8b860c7ecabc 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -224,6 +224,9 @@ void __init dma_contiguous_reserve(phys_addr_t limit) dma_numa_cma_reserve(); + if (is_zhaoxin_kh40000) + return; + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { -- Gitee From e665ce72526ba46c3dc57923499777f190254e84 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 14:25:45 +0800 Subject: [PATCH 899/953] anolis: Add kh40000_iommu_dma_ops for KH-40000 platform ANBZ: #8663 Add 'kh40000_iommu_dma_ops' to replace 'intel_dma_ops' for KH-40000 platform. For coherent DMA access, memory can be allocated only from the memory node of the node where the device resides. For streaming DMA access, add a PCI read operation at the end of DMA access. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- arch/x86/kernel/zhaoxin_kh40000.c | 175 ++++++++++++++++++++++++++++++ drivers/iommu/intel/iommu.c | 3 + include/linux/dma-map-ops.h | 6 + 3 files changed, 184 insertions(+) diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c index c477b18892fa..e8dd3bd43e72 100644 --- a/arch/x86/kernel/zhaoxin_kh40000.c +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -174,3 +174,178 @@ const struct dma_map_ops kh40000_dma_direct_ops = { .map_sg = dma_direct_map_sg, .map_resource = dma_direct_map_resource, }; + +/* zhaoxin kh-40000 iommu dma ops */ +static const struct dma_map_ops *iommu_dma_ops; + +static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + gfp |= __GFP_THISNODE; + + return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) +{ + iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs); +} + +static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp); +} + +static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page, + dma_addr_t dma_handle, enum dma_data_direction dir) +{ + iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir); +} + +static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); +} + +static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir); +} + +static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); +} + +static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); +} + +static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); +} + +static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs); +} + +static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + iommu_dma_ops->sync_single_for_device(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir); +} + +static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs); +} + +static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev) +{ + return iommu_dma_ops->get_merge_boundary(dev); +} + +static size_t kh40000_iommu_dma_opt_mapping_size(void) +{ + return iommu_dma_ops->opt_mapping_size(); +} + +const struct dma_map_ops kh40000_dma_iommu_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_iommu_dma_alloc, + .free = kh40000_iommu_dma_free, + .unmap_page = kh40000_iommu_dma_unmap_page, + .alloc_pages = kh40000_dma_common_alloc_pages, + .free_pages = kh40000_dma_common_free_pages, + .alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous, + .free_noncontiguous = kh40000_iommu_dma_free_noncontiguous, + .mmap = kh40000_iommu_dma_mmap, + .get_sgtable = kh40000_iommu_dma_get_sgtable, + .map_page = kh40000_iommu_dma_map_page, + .map_sg = kh40000_iommu_dma_map_sg, + .unmap_sg = kh40000_iommu_dma_unmap_sg, + .sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu, + .sync_single_for_device = kh40000_iommu_dma_sync_single_for_device, + .sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu, + .sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device, + .map_resource = kh40000_iommu_dma_map_resource, + .unmap_resource = kh40000_iommu_dma_unmap_resource, + .get_merge_boundary = kh40000_iommu_dma_get_merge_boundary, + .opt_mapping_size = kh40000_iommu_dma_opt_mapping_size, +}; + +void kh40000_set_iommu_dma_ops(struct device *dev) +{ + if (dev->dma_ops) { + iommu_dma_ops = dev->dma_ops; + set_dma_ops(dev, &kh40000_dma_iommu_ops); + pr_info_once("zhaoxin iommu dma patch enabled\n"); + } +} diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 744e4e6b8d72..de16a2a71757 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4431,6 +4431,9 @@ static void intel_iommu_probe_finalize(struct device *dev) { set_dma_ops(dev, NULL); iommu_setup_dma_ops(dev, 0, U64_MAX); + + if (is_zhaoxin_kh40000) + kh40000_set_iommu_dma_ops(dev); } static void intel_iommu_get_resv_regions(struct device *device, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index aa2255e1b9a1..0ce2ae6c944d 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -513,10 +513,16 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, extern bool is_zhaoxin_kh40000; extern const struct dma_map_ops kh40000_dma_direct_ops; +void kh40000_set_iommu_dma_ops(struct device *dev); #else bool __weak is_zhaoxin_kh40000; +static inline void kh40000_set_iommu_dma_ops(struct device *dev) +{ + +} + #endif -- Gitee From 8b5d57d8c60f3463caeaddd2f3348ba4989d8894 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Mon, 22 May 2023 16:29:33 +0800 Subject: [PATCH 900/953] anolis: add AER config dependency sanity check to fix build error ANBZ: #8642 The AER cached capability position is defined only when CONFIG_PCIEAER is selected on. Commit ce6b528f9aee ("anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery") add a quirk to save/restore PCIe AER Capability register without CONFIG_PCIEAER sanity check. As a result, the kernel build fails when configuration is based on 'x86_64_defconfig' To fix it, add AER config dependency sanity check. Fixes: 92154a20416b ("anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery") Fixes: c172093138d6 ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2086 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/3031 --- drivers/pci/pci.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9bb054ae6094..17e67b42e2d5 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5214,9 +5214,12 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, if (dev->acs_cap) pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, &saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER if (dev->aer_cap) pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, &saved->root_err_cmd); +#endif pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); } @@ -5242,10 +5245,13 @@ static void pci_restore_yitian710_regs(struct pci_dev *dev, if (dev->acs_cap) pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER /* restore AER Root Error Command Register */ if (dev->aer_cap) pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, saved->root_err_cmd); +#endif /* restore Slot Control Register */ pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); -- Gitee From 97fa8ad23eb5dd997666e6cebbf3d4f853de7fe4 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 10 Apr 2024 15:24:57 +0800 Subject: [PATCH 901/953] anolis: configs: adjust some driver related kconfigs ANBZ: #8598 Adjust follow kconfigs. For arm64: CONFIG_I2C=y CONFIG_DRM_PHYTIUM=m For x86: CONFIG_I2C=y CONFIG_VIRTIO_MMIO=m Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3039 --- arch/arm64/configs/anolis-debug_defconfig | 65 ++++++++++++++++++----- arch/arm64/configs/anolis_defconfig | 65 ++++++++++++++++++----- arch/x86/configs/anolis-debug_defconfig | 56 ++++++++++++------- arch/x86/configs/anolis_defconfig | 56 ++++++++++++------- 4 files changed, 178 insertions(+), 64 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 65ebc3694ef2..13db02910f8b 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.7 Kernel Configuration +# Linux/arm64 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -171,7 +171,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -399,7 +399,9 @@ CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_ARM64_ERRATUM_3117295=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -679,6 +681,7 @@ CONFIG_ACPI_IORT=y CONFIG_ACPI_GTDT=y CONFIG_ACPI_AGDI=y CONFIG_ACPI_APMT=y +CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y CONFIG_ACPI_PCC=y # CONFIG_ACPI_FFH is not set @@ -807,6 +810,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_COMPILER_H=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y @@ -2066,6 +2070,7 @@ CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y CONFIG_UEFI_CPER=y CONFIG_UEFI_CPER_ARM=y +# CONFIG_YITIAN_CPER_RAWDATA is not set CONFIG_ARM_PSCI_FW=y # CONFIG_ARM_PSCI_CHECKER is not set CONFIG_HAVE_ARM_SMCCC=y @@ -2320,10 +2325,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline # CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# # CONFIG_ALTERA_STAPL is not set # CONFIG_VMWARE_VMCI is not set # CONFIG_GENWQE is not set @@ -3356,7 +3357,8 @@ CONFIG_TCG_CRB=y # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3509,7 +3511,6 @@ CONFIG_SPI_QUP=y # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set CONFIG_SPI_XLP=m -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3567,6 +3568,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_OCELOT is not set # CONFIG_PINCTRL_SINGLE is not set # CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_IPQ5018 is not set # CONFIG_PINCTRL_IPQ5332 is not set @@ -4069,7 +4071,11 @@ CONFIG_BCMA_DRIVER_GPIO=y # CONFIG_MFD_CORE=m # CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_ATMEL_FLEXCOM is not set # CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set @@ -4078,7 +4084,10 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set # CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4096,12 +4105,19 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set # CONFIG_MFD_MAX77650 is not set # CONFIG_MFD_MAX77686 is not set # CONFIG_MFD_MAX77693 is not set # CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4119,38 +4135,55 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_RK8XX_I2C is not set # CONFIG_MFD_RK8XX_SPI is not set # CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TPS65217 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TI_LP87565 is not set # CONFIG_MFD_TPS65218 is not set # CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set # CONFIG_MFD_STMFX is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_QCOM_PM8008 is not set @@ -4440,8 +4473,10 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_TIDSS is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set +CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5002,8 +5037,6 @@ CONFIG_MMC_CQHCI=m CONFIG_MMC_TOSHIBA_PCI=m CONFIG_MMC_MTK=m # CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set @@ -5230,7 +5263,7 @@ CONFIG_RTC_DRV_RS5C348=m CONFIG_RTC_DRV_MAX6902=m CONFIG_RTC_DRV_PCF2123=m CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5435,6 +5468,7 @@ CONFIG_CHROME_PLATFORMS=y # CONFIG_CROS_HPS_I2C is not set # CONFIG_CHROMEOS_PRIVACY_SCREEN is not set # CONFIG_MELLANOX_PLATFORM is not set +CONFIG_ARM_CPU_RESCTRL=y CONFIG_SURFACE_PLATFORMS=y # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set @@ -5981,6 +6015,7 @@ CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_PROC_CHILDREN=y +CONFIG_PROC_CPU_RESCTRL=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y @@ -6034,6 +6069,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -6531,6 +6568,7 @@ CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_420XX is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set @@ -6746,6 +6784,7 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y CONFIG_STACKDEPOT_ALWAYS_INIT=y @@ -7009,8 +7048,6 @@ CONFIG_DEBUG_NOTIFIERS=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -CONFIG_DEBUG_CREDENTIALS=y - # # RCU Debugging # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index a563537ca907..0c4270891c76 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.7 Kernel Configuration +# Linux/arm64 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -170,7 +170,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -397,7 +397,9 @@ CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_ARM64_ERRATUM_3117295=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -676,6 +678,7 @@ CONFIG_ACPI_IORT=y CONFIG_ACPI_GTDT=y CONFIG_ACPI_AGDI=y CONFIG_ACPI_APMT=y +CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y CONFIG_ACPI_PCC=y # CONFIG_ACPI_FFH is not set @@ -804,6 +807,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_COMPILER_H=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y @@ -2063,6 +2067,7 @@ CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y CONFIG_UEFI_CPER=y CONFIG_UEFI_CPER_ARM=y +# CONFIG_YITIAN_CPER_RAWDATA is not set CONFIG_ARM_PSCI_FW=y # CONFIG_ARM_PSCI_CHECKER is not set CONFIG_HAVE_ARM_SMCCC=y @@ -2317,10 +2322,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline # CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# # CONFIG_ALTERA_STAPL is not set # CONFIG_VMWARE_VMCI is not set # CONFIG_GENWQE is not set @@ -3353,7 +3354,8 @@ CONFIG_TCG_CRB=y # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3506,7 +3508,6 @@ CONFIG_SPI_QUP=y # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set CONFIG_SPI_XLP=m -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3564,6 +3565,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_OCELOT is not set # CONFIG_PINCTRL_SINGLE is not set # CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_IPQ5018 is not set # CONFIG_PINCTRL_IPQ5332 is not set @@ -4066,7 +4068,11 @@ CONFIG_BCMA_DRIVER_GPIO=y # CONFIG_MFD_CORE=m # CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_ATMEL_FLEXCOM is not set # CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set @@ -4075,7 +4081,10 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set # CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4093,12 +4102,19 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set # CONFIG_MFD_MAX77650 is not set # CONFIG_MFD_MAX77686 is not set # CONFIG_MFD_MAX77693 is not set # CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4116,38 +4132,55 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_RK8XX_I2C is not set # CONFIG_MFD_RK8XX_SPI is not set # CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TPS65217 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TI_LP87565 is not set # CONFIG_MFD_TPS65218 is not set # CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set # CONFIG_MFD_STMFX is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_QCOM_PM8008 is not set @@ -4437,8 +4470,10 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_TIDSS is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set +CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -4999,8 +5034,6 @@ CONFIG_MMC_CQHCI=m CONFIG_MMC_TOSHIBA_PCI=m CONFIG_MMC_MTK=m # CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set @@ -5227,7 +5260,7 @@ CONFIG_RTC_DRV_RS5C348=m CONFIG_RTC_DRV_MAX6902=m CONFIG_RTC_DRV_PCF2123=m CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5431,6 +5464,7 @@ CONFIG_CHROME_PLATFORMS=y # CONFIG_CROS_HPS_I2C is not set # CONFIG_CHROMEOS_PRIVACY_SCREEN is not set # CONFIG_MELLANOX_PLATFORM is not set +CONFIG_ARM_CPU_RESCTRL=y CONFIG_SURFACE_PLATFORMS=y # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set @@ -5977,6 +6011,7 @@ CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_PROC_CHILDREN=y +CONFIG_PROC_CPU_RESCTRL=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y @@ -6030,6 +6065,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -6527,6 +6564,7 @@ CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_420XX is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set @@ -6741,6 +6779,7 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y CONFIG_SBITMAP=y @@ -6958,8 +6997,6 @@ CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -# CONFIG_DEBUG_CREDENTIALS is not set - # # RCU Debugging # diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2619e84b4914..b62162d8c262 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.7 Kernel Configuration +# Linux/x86 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -190,7 +190,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -454,7 +454,6 @@ CONFIG_X86_DIRECT_GBPAGES=y CONFIG_X86_CPA_STATISTICS=y CONFIG_X86_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -549,6 +548,7 @@ CONFIG_CPU_IBRS_ENTRY=y CONFIG_CPU_SRSO=y # CONFIG_SLS is not set # CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_MITIGATION_RFDS=y CONFIG_ARCH_HAS_ADD_PAGES=y # @@ -882,6 +882,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y CONFIG_LOCK_EVENT_COUNTS=y @@ -1869,7 +1870,6 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_LE=y CONFIG_BT_LE_L2CAP_ECRED=y # CONFIG_BT_6LOWPAN is not set @@ -2415,10 +2415,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline CONFIG_SENSORS_LIS3_I2C=m - -# -# Altera FPGA firmware download module (requires I2C) -# CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=m CONFIG_INTEL_MEI_ME=m @@ -3784,6 +3780,8 @@ CONFIG_TCG_INFINEON=m # CONFIG_TCG_XEN is not set CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m # CONFIG_TCG_TIS_ST33ZP24_SPI is not set @@ -3795,7 +3793,8 @@ CONFIG_TELCLOCK=m # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3922,7 +3921,6 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3978,6 +3976,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set # # Intel pinctrl drivers @@ -4514,13 +4513,19 @@ CONFIG_BCMA_DRIVER_GPIO=y # Multifunction device drivers # CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_BD9571MWV is not set # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4540,9 +4545,15 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set # CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4557,30 +4568,40 @@ CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_INTEL_M10_BMC_SPI is not set @@ -4821,6 +4842,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5621,7 +5643,7 @@ CONFIG_RTC_DRV_EM3027=m # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_PCF2123 is not set # CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5764,7 +5786,7 @@ CONFIG_VIRTIO_PMEM=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VIRTIO_DMA_SHARED_BUFFER=m # CONFIG_VDPA is not set @@ -6919,6 +6941,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -7430,12 +7454,10 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m -CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7906,8 +7928,6 @@ CONFIG_DEBUG_NOTIFIERS=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -CONFIG_DEBUG_CREDENTIALS=y - # # RCU Debugging # diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ee0afda89125..52662a6020b9 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.7 Kernel Configuration +# Linux/x86 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -189,7 +189,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -451,7 +451,6 @@ CONFIG_X86_DIRECT_GBPAGES=y CONFIG_X86_CPA_STATISTICS=y CONFIG_X86_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -546,6 +545,7 @@ CONFIG_CPU_IBRS_ENTRY=y CONFIG_CPU_SRSO=y # CONFIG_SLS is not set # CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_MITIGATION_RFDS=y CONFIG_ARCH_HAS_ADD_PAGES=y # @@ -877,6 +877,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y # CONFIG_LOCK_EVENT_COUNTS is not set @@ -1864,7 +1865,6 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_LE=y CONFIG_BT_LE_L2CAP_ECRED=y # CONFIG_BT_6LOWPAN is not set @@ -2410,10 +2410,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline CONFIG_SENSORS_LIS3_I2C=m - -# -# Altera FPGA firmware download module (requires I2C) -# CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=m CONFIG_INTEL_MEI_ME=m @@ -3778,6 +3774,8 @@ CONFIG_TCG_INFINEON=m # CONFIG_TCG_XEN is not set CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m # CONFIG_TCG_TIS_ST33ZP24_SPI is not set @@ -3789,7 +3787,8 @@ CONFIG_TELCLOCK=m # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3916,7 +3915,6 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3972,6 +3970,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set # # Intel pinctrl drivers @@ -4508,13 +4507,19 @@ CONFIG_BCMA_DRIVER_GPIO=y # Multifunction device drivers # CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_BD9571MWV is not set # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4534,9 +4539,15 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set # CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4551,30 +4562,40 @@ CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_INTEL_M10_BMC_SPI is not set @@ -4815,6 +4836,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5615,7 +5637,7 @@ CONFIG_RTC_DRV_EM3027=m # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_PCF2123 is not set # CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5757,7 +5779,7 @@ CONFIG_VIRTIO_PMEM=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VIRTIO_DMA_SHARED_BUFFER=m # CONFIG_VDPA is not set @@ -6908,6 +6930,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -7421,12 +7445,10 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m -CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7850,8 +7872,6 @@ CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -# CONFIG_DEBUG_CREDENTIALS is not set - # # RCU Debugging # -- Gitee From 281e8cfc89dc11f0cfacdb3f189e7393caf9ac24 Mon Sep 17 00:00:00 2001 From: wangkaiyuan Date: Tue, 12 Mar 2024 17:33:30 +0800 Subject: [PATCH 902/953] anolis: drm/inspur/inspur-drm: Add inspur drm driver Add Inspur DRM driver for Inspur BMC SoC. ANBZ: #8520 Signed-off-by: wangkaiyuan Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3046 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/loongarch/configs/anolis-debug_defconfig | 1 + arch/loongarch/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/inspur/Kconfig | 5 + drivers/gpu/drm/inspur/Makefile | 4 + drivers/gpu/drm/inspur/inspur-drm/Kconfig | 8 + drivers/gpu/drm/inspur/inspur-drm/Makefile | 3 + .../gpu/drm/inspur/inspur-drm/inspur_drm_de.c | 484 ++++++++++++++++++ .../drm/inspur/inspur-drm/inspur_drm_drv.c | 404 +++++++++++++++ .../drm/inspur/inspur-drm/inspur_drm_drv.h | 86 ++++ .../drm/inspur/inspur-drm/inspur_drm_regs.h | 209 ++++++++ .../drm/inspur/inspur-drm/inspur_drm_vdac.c | 105 ++++ .../gpu/drm/inspur/inspur-drm/inspur_ttm.c | 19 + 18 files changed, 1336 insertions(+) create mode 100644 drivers/gpu/drm/inspur/Kconfig create mode 100644 drivers/gpu/drm/inspur/Makefile create mode 100644 drivers/gpu/drm/inspur/inspur-drm/Kconfig create mode 100644 drivers/gpu/drm/inspur/inspur-drm/Makefile create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 13db02910f8b..aeb6bc8aa483 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -4477,6 +4477,7 @@ CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 0c4270891c76..2940c4369081 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -4474,6 +4474,7 @@ CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index 365f27c124b4..e13de1212610 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -5345,6 +5345,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index 365f27c124b4..e13de1212610 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -5345,6 +5345,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index b62162d8c262..ff275d4fdbc1 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -4843,6 +4843,7 @@ CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 52662a6020b9..753475d33452 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -4837,6 +4837,7 @@ CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2a89adbbf9fa..1c17d051c98f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -390,6 +390,8 @@ source "drivers/gpu/drm/sprd/Kconfig" source "drivers/gpu/drm/phytium/Kconfig" +source "drivers/gpu/drm/inspur/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 017ff5a6ebe2..68d2eefe2c25 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -200,3 +200,4 @@ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ obj-$(CONFIG_DRM_PHYTIUM) += phytium/ +obj-$(CONFIG_DRM_INSPUR) += inspur/ diff --git a/drivers/gpu/drm/inspur/Kconfig b/drivers/gpu/drm/inspur/Kconfig new file mode 100644 index 000000000000..9ee949fc6936 --- /dev/null +++ b/drivers/gpu/drm/inspur/Kconfig @@ -0,0 +1,5 @@ +# License: GPL-2.0 +# +# inspur drm device configuration. + +source "drivers/gpu/drm/inspur/inspur-drm/Kconfig" diff --git a/drivers/gpu/drm/inspur/Makefile b/drivers/gpu/drm/inspur/Makefile new file mode 100644 index 000000000000..9fd0eb7a1035 --- /dev/null +++ b/drivers/gpu/drm/inspur/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for inspur drm drivers. + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm/ diff --git a/drivers/gpu/drm/inspur/inspur-drm/Kconfig b/drivers/gpu/drm/inspur/inspur-drm/Kconfig new file mode 100644 index 000000000000..c060825d6116 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Kconfig @@ -0,0 +1,8 @@ +config DRM_INSPUR + tristate "DRM Support for Inspur BMC" + depends on DRM && PCI && MMU + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + help + Choose this option if you have a Inspur soc chipset.If M is selected the + module will be called inspur - drm. diff --git a/drivers/gpu/drm/inspur/inspur-drm/Makefile b/drivers/gpu/drm/inspur/inspur-drm/Makefile new file mode 100644 index 000000000000..be54bb9e51d0 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Makefile @@ -0,0 +1,3 @@ +inspur-drm-y := inspur_drm_drv.o inspur_drm_de.o inspur_drm_vdac.o inspur_ttm.o + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm.o diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c new file mode 100644 index 000000000000..fae1014e5d59 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include +#include + + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +struct inspur_dislay_pll_config { + unsigned long hdisplay; + unsigned long vdisplay; + u32 pll1_config_value; + u32 pll2_config_value; +}; + +static const struct inspur_dislay_pll_config inspur_pll_table[] = { + { 640, 480, CRT_PLL1_NS_25MHZ, CRT_PLL2_NS_25MHZ }, + { 800, 600, CRT_PLL1_NS_40MHZ, CRT_PLL2_NS_40MHZ }, + { 1024, 768, CRT_PLL1_NS_65MHZ, CRT_PLL2_NS_65MHZ }, + { 1280, 1024, CRT_PLL1_NS_108MHZ, CRT_PLL2_NS_108MHZ }, + { 1920, 1080, CRT_PLL1_NS_148MHZ, CRT_PLL2_NS_148MHZ }, +}; + +#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1))) + +static int inspur_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *atom_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atom_state, plane); + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + + if (!crtc || !fb) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_DEBUG_ATOMIC("scale not support\n"); + return -EINVAL; + } + + if (state->crtc_x < 0 || state->crtc_y < 0) { + DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n"); + return -EINVAL; + } + + if (!crtc_state->enable) + return 0; + + if (state->crtc_x + state->crtc_w > + crtc_state->adjusted_mode.hdisplay || + state->crtc_y + state->crtc_h > + crtc_state->adjusted_mode.vdisplay) { + DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n"); + return -EINVAL; + } + + if (state->fb->pitches[0] % 16 != 0) { + DRM_DEBUG_ATOMIC("wrong stride with 16-byte aligned\n"); + return -EINVAL; + } + + return 0; +} + +static void inspur_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *old_state) +{ + struct drm_plane_state *state = plane->state; + u32 reg; + int ret; + s64 gpu_addr = 0; + unsigned int line_l; + struct inspur_drm_private *priv = plane->dev->dev_private; + struct drm_gem_vram_object *gbo; + + if (!state->fb) + return; + + gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) { + DRM_ERROR("failed to pin bo: %d", ret); + return; + } + gpu_addr = drm_gem_vram_offset(gbo); + if (gpu_addr < 0) { + drm_gem_vram_unpin(gbo); + return; + } + + writel(gpu_addr, priv->mmio + INSPUR_CRT_FB_ADDRESS); + + reg = state->fb->width * (state->fb->format->cpp[0]); + + line_l = state->fb->pitches[0]; + writel(INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_WIDTH, reg) | + INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_OFFS, line_l), + priv->mmio + INSPUR_CRT_FB_WIDTH); + + /* SET PIXEL FORMAT */ + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_FORMAT_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_FORMAT, + state->fb->format->cpp[0] * 8 / 16); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static const u32 channel_formats1[] = { + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888 +}; + +static struct drm_plane_funcs inspur_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const struct drm_plane_helper_funcs inspur_plane_helper_funcs = { + .atomic_check = inspur_plane_atomic_check, + .atomic_update = inspur_plane_atomic_update, +}; + +static struct drm_plane *inspur_plane_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_plane *plane; + int ret = 0; + + plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) { + DRM_ERROR("failed to alloc memory when init plane\n"); + return ERR_PTR(-ENOMEM); + } + ret = drm_universal_plane_init(dev, plane, 1, &inspur_plane_funcs, + channel_formats1, + ARRAY_SIZE(channel_formats1), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + DRM_ERROR("failed to init plane: %d\n", ret); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &inspur_plane_helper_funcs); + return plane; +} + +static void inspur_crtc_dpms(struct drm_crtc *crtc, int dpms) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + unsigned int reg; + + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_DPMS_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_DPMS, dpms); + reg &= ~INSPUR_CRT_DISP_CTL_TIMING_MASK; + if (dpms == INSPUR_CRT_DPMS_ON) + reg |= INSPUR_CRT_DISP_CTL_TIMING(1); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static void inspur_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + reg |= INSPUR_CURR_GATE_DISPLAY(1); + inspur_set_current_gate(priv, reg); + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_ON); +} + +static void inspur_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_OFF); + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_SLEEP); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(0); + reg |= INSPUR_CURR_GATE_DISPLAY(0); + inspur_set_current_gate(priv, reg); +} + +static enum drm_mode_status +inspur_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + int i = 0; + int vrefresh = drm_mode_vrefresh(mode); + + if (vrefresh < 59 || vrefresh > 61) + return MODE_NOCLOCK; + + for (i = 0; i < ARRAY_SIZE(inspur_pll_table); i++) { + if (inspur_pll_table[i].hdisplay == mode->hdisplay && + inspur_pll_table[i].vdisplay == mode->vdisplay) + return MODE_OK; + } + + return MODE_BAD; +} + +static void set_vclock_inspur(struct drm_device *dev, unsigned long pll) +{ + u32 val; + struct inspur_drm_private *priv = dev->dev_private; + + val = readl(priv->mmio + CRT_PLL1_NS); + val &= ~(CRT_PLL1_NS_OUTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + val = CRT_PLL1_NS_INTER_BYPASS(1) | CRT_PLL1_NS_POWERON(1); + writel(val, priv->mmio + CRT_PLL1_NS); + + writel(pll, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val = pll & ~(CRT_PLL1_NS_POWERON(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val &= ~(CRT_PLL1_NS_INTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val |= CRT_PLL1_NS_OUTER_BYPASS(1); + writel(val, priv->mmio + CRT_PLL1_NS); +} + +static void get_pll_config(unsigned long x, unsigned long y, + u32 *pll1, u32 *pll2) +{ + int i; + int count = ARRAY_SIZE(inspur_pll_table); + + for (i = 0; i < count; i++) { + if (inspur_pll_table[i].hdisplay == x && + inspur_pll_table[i].vdisplay == y) { + *pll1 = inspur_pll_table[i].pll1_config_value; + *pll2 = inspur_pll_table[i].pll2_config_value; + return; + } + } + + /* if found none, we use default value */ + *pll1 = CRT_PLL1_NS_25MHZ; + *pll2 = CRT_PLL2_NS_25MHZ; +} + +/* + * This function takes care the extra registers and bit fields required to + * setup a mode in board. + * Explanation about Display Control register: + * FPGA only supports 7 predefined pixel clocks, and clock select is + * in bit 4:0 of new register 0x802a8. + */ +static unsigned int display_ctrl_adjust(struct drm_device *dev, + struct drm_display_mode *mode, + unsigned int ctrl) +{ + unsigned long x, y; + u32 pll1; /* bit[31:0] of PLL */ + u32 pll2; /* bit[63:32] of PLL */ + struct inspur_drm_private *priv = dev->dev_private; + + x = mode->hdisplay; + y = mode->vdisplay; + + get_pll_config(x, y, &pll1, &pll2); + writel(pll2, priv->mmio + CRT_PLL2_NS); + set_vclock_inspur(dev, pll1); + + /* + * inspur has to set up the top-left and bottom-right + * registers as well. + * Note that normal chip only use those two register for + * auto-centering mode. + */ + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_TOP, 0) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_LEFT, 0), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_TL); + + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_RIGHT, x - 1), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_BR); + + /* + * Assume common fields in ctrl have been properly set before + * calling this function. + * This function only sets the extra fields in ctrl. + */ + + /* Set bit 25 of display controller: Select CRT or VGA clock */ + ctrl &= ~INSPUR_CRT_DISP_CTL_CRTSELECT_MASK; + ctrl &= ~INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK; + + ctrl |= INSPUR_CRT_DISP_CTL_CRTSELECT(INSPUR_CRTSELECT_CRT); + + /* clock_phase_polarity is 0 */ + ctrl |= INSPUR_CRT_DISP_CTL_CLOCK_PHASE(0); + + writel(ctrl, priv->mmio + INSPUR_CRT_DISP_CTL); + + return ctrl; +} + +static void inspur_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + unsigned int val; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + int width = mode->hsync_end - mode->hsync_start; + int height = mode->vsync_end - mode->vsync_start; + + //writel(format_pll_reg(), priv->mmio + INSPUR_CRT_PLL_CTRL); + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) | + INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1), + priv->mmio + INSPUR_CRT_HORZ_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_WIDTH, width) | + INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_START, mode->hsync_start - 1), + priv->mmio + INSPUR_CRT_HORZ_SYNC); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) | + INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1), + priv->mmio + INSPUR_CRT_VERT_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_HEIGHT, height) | + INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_START, mode->vsync_start - 1), + priv->mmio + INSPUR_CRT_VERT_SYNC); + + val = INSPUR_FIELD(INSPUR_CRT_DISP_CTL_VSYNC_PHASE, 0); + val |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_HSYNC_PHASE, 0); + val |= INSPUR_CRT_DISP_CTL_TIMING(1); + val |= INSPUR_CRT_DISP_CTL_PLANE(1); + + display_ctrl_adjust(dev, mode, val); +} + +static void inspur_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + inspur_set_current_gate(priv, reg); + + /* We can add more initialization as needed. */ +} + +static void inspur_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) + +{ + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (crtc->state->event) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +static int inspur_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); + + return 0; +} + +static void inspur_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(0), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); +} + +static const struct drm_crtc_funcs inspur_crtc_funcs = { + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = inspur_crtc_enable_vblank, + .disable_vblank = inspur_crtc_disable_vblank, + +}; + +static const struct drm_crtc_helper_funcs inspur_crtc_helper_funcs = { + .mode_set_nofb = inspur_crtc_mode_set_nofb, + .atomic_begin = inspur_crtc_atomic_begin, + .atomic_flush = inspur_crtc_atomic_flush, + .atomic_enable = inspur_crtc_atomic_enable, + .atomic_disable = inspur_crtc_atomic_disable, + .mode_valid = inspur_crtc_mode_valid, +}; + +int inspur_de_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + int ret; + + plane = inspur_plane_init(priv); + if (IS_ERR(plane)) { + DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane)); + return PTR_ERR(plane); + } + + crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL); + if (!crtc) { + DRM_ERROR("failed to alloc memory when init crtc\n"); + return -ENOMEM; + } + + ret = drm_crtc_init_with_planes(dev, crtc, plane, + NULL, &inspur_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("failed to init crtc: %d\n", ret); + return ret; + } + + ret = drm_mode_crtc_set_gamma_size(crtc, 256); + if (ret) { + DRM_ERROR("failed to set gamma size: %d\n", ret); + return ret; + } + drm_crtc_helper_add(crtc, &inspur_crtc_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c new file mode 100644 index 000000000000..c522ca90b00c --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +#define MEM_SIZE_RESERVE4KVM 0x200000 + +DEFINE_DRM_GEM_FOPS(inspur_fops); +irqreturn_t inspur_drm_interrupt(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct inspur_drm_private *priv = + (struct inspur_drm_private *)dev->dev_private; + u32 status; + + status = readl(priv->mmio + INSPUR_RAW_INTERRUPT); + + if (status & INSPUR_RAW_INTERRUPT_VBLANK(1)) { + writel(INSPUR_RAW_INTERRUPT_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT); + drm_handle_vblank(dev, 0); + } + + return IRQ_HANDLED; +} + +static struct drm_driver inspur_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + + .fops = &inspur_fops, + .name = "inspur", + .date = "20240201", + .desc = "inspur drm driver", + .major = 3, + .minor = 0, + .dumb_create = inspur_dumb_create, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, +}; + +static int __maybe_unused inspur_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_kms_helper_poll_disable(drm_dev); + priv->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(priv->suspend_state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", + PTR_ERR(priv->suspend_state)); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(priv->suspend_state); + } + + return 0; +} + +static int __maybe_unused inspur_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_atomic_helper_resume(drm_dev, priv->suspend_state); + drm_kms_helper_poll_enable(drm_dev); + + return 0; +} + +static const struct dev_pm_ops inspur_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(inspur_pm_suspend, + inspur_pm_resume) +}; + +static int inspur_kms_init(struct inspur_drm_private *priv) +{ + int ret; + + drm_mode_config_init(priv->dev); + priv->mode_config_initialized = true; + + priv->dev->mode_config.min_width = 0; + priv->dev->mode_config.min_height = 0; + priv->dev->mode_config.max_width = 1920; + priv->dev->mode_config.max_height = 1200; + priv->dev->mode_config.preferred_depth = 32; + priv->dev->mode_config.prefer_shadow = 1; + priv->dev->mode_config.funcs = (void *)&inspur_mode_funcs; + + ret = inspur_de_init(priv); + if (ret) { + DRM_ERROR("failed to init de: %d\n", ret); + return ret; + } + + ret = inspur_vdac_init(priv); + if (ret) { + DRM_ERROR("failed to init vdac: %d\n", ret); + return ret; + } + + return 0; +} + +static void inspur_kms_fini(struct inspur_drm_private *priv) +{ + if (priv->mode_config_initialized) { + drm_mode_config_cleanup(priv->dev); + priv->mode_config_initialized = false; + } +} + +/* + * It can operate in one of three modes: 0, 1 or Sleep. + */ +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode) +{ + unsigned int control_value = 0; + void __iomem *mmio = priv->mmio; + unsigned int input = 1; + + if (power_mode > INSPUR_PW_MODE_CTL_MODE_SLEEP) + return; + + if (power_mode == INSPUR_PW_MODE_CTL_MODE_SLEEP) + input = 0; + + control_value = readl(mmio + INSPUR_POWER_MODE_CTRL); + control_value &= ~(INSPUR_PW_MODE_CTL_MODE_MASK | + INSPUR_PW_MODE_CTL_OSC_INPUT_MASK); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_MODE, power_mode); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_OSC_INPUT, input); + writel(control_value, mmio + INSPUR_POWER_MODE_CTRL); +} + +void inspur_set_current_gate(struct inspur_drm_private *priv, unsigned int gate) +{ + unsigned int gate_reg; + unsigned int mode; + void __iomem *mmio = priv->mmio; + + /* Get current power mode. */ + mode = (readl(mmio + INSPUR_POWER_MODE_CTRL) & + INSPUR_PW_MODE_CTL_MODE_MASK) >> INSPUR_PW_MODE_CTL_MODE_SHIFT; + + switch (mode) { + case INSPUR_PW_MODE_CTL_MODE_MODE0: + gate_reg = INSPUR_MODE0_GATE; + break; + + case INSPUR_PW_MODE_CTL_MODE_MODE1: + gate_reg = INSPUR_MODE1_GATE; + break; + + default: + gate_reg = INSPUR_MODE0_GATE; + break; + } + writel(gate, mmio + gate_reg); +} + +static void inspur_hw_config(struct inspur_drm_private *priv) +{ + unsigned int reg; + + /* On hardware reset, power mode 0 is default. */ + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + + inspur_set_current_gate(priv, reg); + + /* + * Reset the memory controller. If the memory controller + * is not reset in chip,the system might hang when sw accesses + * the memory.The memory should be resetted after + * changing the MXCLK. + */ + reg = readl(priv->mmio + INSPUR_MISC_CTRL); + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(0); + writel(reg, priv->mmio + INSPUR_MISC_CTRL); + + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(1); + + writel(reg, priv->mmio + INSPUR_MISC_CTRL); +} + +static int inspur_hw_map(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + resource_size_t addr, size, ioaddr, iosize; + + ioaddr = pci_resource_start(pdev, 1); + iosize = pci_resource_len(pdev, 1); + priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize); + if (!priv->mmio) { + DRM_ERROR("Cannot map mmio region\n"); + return -ENOMEM; + } + + addr = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + priv->fb_map = devm_ioremap(dev->dev, addr, size); + if (!priv->fb_map) { + DRM_ERROR("Cannot map framebuffer\n"); + return -ENOMEM; + } + priv->fb_base = addr; + priv->fb_size = size - MEM_SIZE_RESERVE4KVM; + + return 0; +} + +static void inspur_hw_unmap(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + + if (priv->mmio) { + devm_iounmap(dev->dev, priv->mmio); + priv->mmio = NULL; + } + + if (priv->fb_map) { + devm_iounmap(dev->dev, priv->fb_map); + priv->fb_map = NULL; + } +} + +static int inspur_hw_init(struct inspur_drm_private *priv) +{ + int ret; + + ret = inspur_hw_map(priv); + if (ret) + return ret; + + inspur_hw_config(priv); + + return 0; +} + +void inspur_unload(struct drm_device *dev) +{ + struct inspur_drm_private *priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + drm_atomic_helper_shutdown(dev); + + free_irq(pdev->irq, dev); + + inspur_kms_fini(priv); + inspur_hw_unmap(priv); + pci_disable_msi(to_pci_dev(dev->dev)); + dev->dev_private = NULL; + +} + +int inspur_load(struct drm_device *dev, unsigned long flags) +{ + struct inspur_drm_private *priv; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int ret; + + priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + DRM_ERROR("no memory to allocate for inspur_drm_private\n"); + return -ENOMEM; + } + dev->dev_private = priv; + priv->dev = dev; + + ret = inspur_hw_init(priv); + if (ret) + goto err; + + ret = + drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), + priv->fb_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); + goto err; + } + ret = inspur_kms_init(priv); + if (ret) + goto err; + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(dev); + + return 0; + +err: + inspur_unload(dev); + DRM_ERROR("failed to initialize drm driver: %d\n", ret); + return ret; +} + +static int inspur_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = 0; + struct drm_device *dev; + + ret = + drm_aperture_remove_conflicting_pci_framebuffers(pdev, + &inspur_driver); + if (ret) + return ret; + + dev = drm_dev_alloc(&inspur_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + pci_set_drvdata(pdev, dev); + ret = pci_enable_device(pdev); + if (ret) { + drm_err(dev, "failed to enable pci device: %d\n", ret); + return ret; + } + ret = inspur_load(dev, ent->driver_data); + if (ret) + goto err_return; + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + goto err_inspur_driver_unload; + + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); + + return 0; +err_inspur_driver_unload: + inspur_unload(dev); +err_return: + return ret; +} + +static void inspur_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); + pci_disable_device(pdev); +} + +static void inspur_pci_shutdown(struct pci_dev *pdev) +{ + inspur_pci_remove(pdev); +} + +static struct pci_device_id inspur_pci_table[] = { + { 0x1bd4, 0x0750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver inspur_pci_driver = { + .name = "inspur-drm", + .id_table = inspur_pci_table, + .probe = inspur_pci_probe, + .remove = inspur_pci_remove, + .shutdown = inspur_pci_shutdown, + .driver.pm = &inspur_pm_ops, +}; + +static int __init inspur_init(void) +{ + return pci_register_driver(&inspur_pci_driver); +} + +static void __exit inspur_exit(void) +{ + return pci_unregister_driver(&inspur_pci_driver); +} + +module_init(inspur_init); +module_exit(inspur_exit); + +MODULE_DEVICE_TABLE(pci, inspur_pci_table); +MODULE_AUTHOR(""); +MODULE_DESCRIPTION("DRM Driver for InspurBMC"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("3.0"); diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h new file mode 100644 index 000000000000..d47f1fbc4ad0 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_DRV_H +#define INSPUR_DRM_DRV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct drm_device; +struct drm_gem_object; + +#define inspur_framebuffer drm_framebuffer +#define BPP16_RED 0x0000f800 +#define BPP16_GREEN 0x000007e0 +#define BPP16_BLUE 0x0000001f +#define BPP16_WHITE 0x0000ffff +#define BPP16_GRAY 0x00008410 +#define BPP16_YELLOW 0x0000ffe0 +#define BPP16_CYAN 0x000007ff +#define BPP16_PINK 0x0000f81f +#define BPP16_BLACK 0x00000000 +struct inspur_fbdev { + struct drm_fb_helper helper; + struct inspur_framebuffer *fb; + int size; +}; + +struct inspur_cursor { + struct drm_gem_vram_object *gbo[2]; + unsigned int next_index; +}; + +struct inspur_drm_private { + /* hw */ + void __iomem *mmio; + void __iomem *fb_map; + unsigned long fb_base; + unsigned long fb_size; + + /* drm */ + struct drm_device *dev; + + bool mode_config_initialized; + struct drm_atomic_state *suspend_state; + + /* fbdev */ + struct inspur_fbdev *fbdev; + + /* hw cursor */ + struct inspur_cursor cursor; +}; + +#define to_inspur_framebuffer(x) container_of(x, struct inspur_framebuffer, fb) + +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode); +void inspur_set_current_gate(struct inspur_drm_private *priv, + unsigned int gate); +int inspur_load(struct drm_device *dev, unsigned long flags); +void inspur_unload(struct drm_device *dev); + +int inspur_de_init(struct inspur_drm_private *priv); +int inspur_vdac_init(struct inspur_drm_private *priv); + +int inspur_gem_create(struct drm_device *dev, u32 size, bool iskernel, + struct drm_gem_object **obj); + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +extern const struct drm_mode_config_funcs inspur_mode_funcs; + +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h new file mode 100644 index 000000000000..1b845440ba44 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_HW_H +#define INSPUR_DRM_HW_H + +/* register definition */ +#define INSPUR_MISC_CTRL 0x4 + +#define INSPUR_MSCCTL_LOCALMEM_RESET(x) ((x) << 6) +#define INSPUR_MSCCTL_LOCALMEM_RESET_MASK 0x40 + +#define INSPUR_CURRENT_GATE 0x000040 +#define INSPUR_CURR_GATE_DISPLAY(x) ((x) << 2) +#define INSPUR_CURR_GATE_DISPLAY_MASK 0x4 + +#define INSPUR_CURR_GATE_LOCALMEM(x) ((x) << 1) +#define INSPUR_CURR_GATE_LOCALMEM_MASK 0x2 + +#define INSPUR_MODE0_GATE 0x000044 +#define INSPUR_MODE1_GATE 0x000048 +#define INSPUR_POWER_MODE_CTRL 0x00004C + +#define INSPUR_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3) +#define INSPUR_PW_MODE_CTL_OSC_INPUT_MASK 0x8 + +#define INSPUR_PW_MODE_CTL_MODE(x) ((x) << 0) +#define INSPUR_PW_MODE_CTL_MODE_MASK 0x03 +#define INSPUR_PW_MODE_CTL_MODE_SHIFT 0 + +#define INSPUR_PW_MODE_CTL_MODE_MODE0 0 +#define INSPUR_PW_MODE_CTL_MODE_MODE1 1 +#define INSPUR_PW_MODE_CTL_MODE_SLEEP 2 + +//#define INSPUR_CRT_PLL_CTRL 0x000060 + +#define INSPUR_PLL_CTRL_BYPASS(x) ((x) << 18) +#define INSPUR_PLL_CTRL_BYPASS_MASK 0x40000 + +#define INSPUR_PLL_CTRL_POWER(x) ((x) << 17) +#define INSPUR_PLL_CTRL_POWER_MASK 0x20000 + +#define INSPUR_PLL_CTRL_INPUT(x) ((x) << 16) +#define INSPUR_PLL_CTRL_INPUT_MASK 0x10000 + +#define INSPUR_PLL_CTRL_POD(x) ((x) << 14) +#define INSPUR_PLL_CTRL_POD_MASK 0xC000 + +#define INSPUR_PLL_CTRL_OD(x) ((x) << 12) +#define INSPUR_PLL_CTRL_OD_MASK 0x3000 + +#define INSPUR_PLL_CTRL_N(x) ((x) << 8) +#define INSPUR_PLL_CTRL_N_MASK 0xF00 + +#define INSPUR_PLL_CTRL_M(x) ((x) << 0) +#define INSPUR_PLL_CTRL_M_MASK 0xFF + +#define INSPUR_CRT_DISP_CTL 0x80200 + +#define INSPUR_CRT_DISP_CTL_DPMS(x) ((x) << 30) +#define INSPUR_CRT_DISP_CTL_DPMS_MASK 0xc0000000 + +#define INSPUR_CRT_DPMS_ON 0 +#define INSPUR_CRT_DPMS_OFF 3 + +#define INSPUR_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25) +#define INSPUR_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000 + +#define INSPUR_CRTSELECT_CRT 1 + +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14) +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000 + +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13) +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000 + +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12) +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000 + +#define INSPUR_CRT_DISP_CTL_TIMING(x) ((x) << 8) +#define INSPUR_CRT_DISP_CTL_TIMING_MASK 0x100 + +#define INSPUR_CRT_DISP_CTL_PLANE(x) ((x) << 2) +#define INSPUR_CRT_DISP_CTL_PLANE_MASK 4 + +#define INSPUR_CRT_DISP_CTL_FORMAT(x) ((x) << 0) +#define INSPUR_CRT_DISP_CTL_FORMAT_MASK 0x03 + +#define INSPUR_CRT_FB_ADDRESS 0x080204 + +#define INSPUR_CRT_FB_WIDTH 0x080208 +#define INSPUR_CRT_FB_WIDTH_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000 +#define INSPUR_CRT_FB_WIDTH_OFFS(x) ((x) << 0) +#define INSPUR_CRT_FB_WIDTH_OFFS_MASK 0x3FFF + +#define INSPUR_CRT_HORZ_TOTAL 0x08020C +#define INSPUR_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000 + +#define INSPUR_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF + +#define INSPUR_CRT_HORZ_SYNC 0x080210 +#define INSPUR_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000 + +#define INSPUR_CRT_HORZ_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_HORZ_SYNC_START_MASK 0xFFF + +#define INSPUR_CRT_VERT_TOTAL 0x080214 +#define INSPUR_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000 + +#define INSPUR_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF + +#define INSPUR_CRT_VERT_SYNC 0x080218 +#define INSPUR_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16) +#define INSPUR_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000 + +#define INSPUR_CRT_VERT_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_VERT_SYNC_START_MASK 0x7FF + +/* Hardware Cursor */ +#define INSPUR_HWC_ADDRESS 0x080230 +#define INSPUR_HWC_ADDRESS_ENABLE(x) ((x) << 31) +#define INSPUR_HWC_ADDRESS_ENABLE_MASK 0x80000000 +#define INSPUR_HWC_ADDRESS_ADDRESS(x) ((x) << 0) +#define INSPUR_HWC_ADDRESS_ADDRESS_MASK 0xFFFFFFF + +#define INSPUR_HWC_LOCATION 0x080234 +#define INSPUR_HWC_LOCATION_TOP(x) ((x) << 27) +#define INSPUR_HWC_LOCATION_TOP_MASK 0x8000000 +#define INSPUR_HWC_LOCATION_Y(x) ((x) << 16) +#define INSPUR_HWC_LOCATION_Y_MASK 0x7FF0000 +#define INSPUR_HWC_LOCATION_LEFT(x) ((x) << 11) +#define INSPUR_HWC_LOCATION_LEFT_MASK 0x800 +#define INSPUR_HWC_LOCATION_X(x) ((x) << 0) +#define INSPUR_HWC_LOCATION_X_MASK 0x7FF + +#define INSPUR_HWC_COLOR_12 0x080238 +#define INSPUR_HWC_COLOR_12_2_RGB(x) ((x) << 16) +#define INSPUR_HWC_COLOR_12_2_RGB_MASK 0xFFFF0000 +#define INSPUR_HWC_COLOR_12_1_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_12_1_RGB_MASK 0xFFFF + +#define INSPUR_HWC_COLOR_3 0x08023C +#define INSPUR_HWC_COLOR_3_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_3_RGB_MASK 0xFFFF + +/* Auto Centering */ +#define INSPUR_CRT_AUTO_CENTERING_TL 0x080280 +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF + +#define INSPUR_CRT_AUTO_CENTERING_BR 0x080284 +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF + +/* register to control panel output */ +#define INSPUR_DISPLAY_CONTROL_HISILE 0x80288 +#define INSPUR_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0) +#define INSPUR_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1) +#define INSPUR_DISPLAY_CONTROL_FPEN(x) ((x) << 2) +#define INSPUR_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3) + +#define INSPUR_RAW_INTERRUPT 0x80290 +#define INSPUR_RAW_INTERRUPT_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_VBLANK_MASK 0x4 + +#define INSPUR_RAW_INTERRUPT_EN 0x80298 +#define INSPUR_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_EN_VBLANK_MASK 0x4 + +/* register and values for PLL control */ +#define CRT_PLL1_NS 0x802a8 +#define CRT_PLL1_NS_OUTER_BYPASS(x) ((x) << 30) +#define CRT_PLL1_NS_INTER_BYPASS(x) ((x) << 29) +#define CRT_PLL1_NS_POWERON(x) ((x) << 24) + +#define CRT_PLL1_NS_25MHZ 0x00006691 //640x480 +#define CRT_PLL1_NS_40MHZ 0x00004580 //800x600 +#define CRT_PLL1_NS_65MHZ 0x00002568 //1024x768 +#define CRT_PLL1_NS_83MHZ 0x000027bb //1280x800 +#define CRT_PLL1_NS_106MHZ 0x000027ef //1440x900 +#define CRT_PLL1_NS_108MHZ 0x000027f2 //1280x1024 +#define CRT_PLL1_NS_146MHZ 0x00001575 //1680x1050 +#define CRT_PLL1_NS_148MHZ 0x0000145f //1920x1080 +#define CRT_PLL1_NS_193MHZ 0x000018f7 //1920x1200 + +#define CRT_PLL2_NS 0x802ac +#define CRT_PLL2_NS_25MHZ 0x0 +#define CRT_PLL2_NS_40MHZ 0x0 +#define CRT_PLL2_NS_65MHZ 0x0 +#define CRT_PLL2_NS_83MHZ 0x0 +#define CRT_PLL2_NS_106MHZ 0x0 +#define CRT_PLL2_NS_108MHZ 0x0 +#define CRT_PLL2_NS_146MHZ 0x0 +#define CRT_PLL2_NS_148MHZ 0x0 +#define CRT_PLL2_NS_193MHZ 0x0 + +#define INSPUR_FIELD(field, value) (field(value) & field##_MASK) +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c new file mode 100644 index 000000000000..4b31d82b00f8 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +static int inspur_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, + connector->dev->mode_config.max_width, + connector->dev->mode_config.max_height); + drm_set_preferred_mode(connector, 1024, 768); + return count; +} + +static int inspur_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs inspur_connector_helper_funcs = { + .get_modes = inspur_connector_get_modes, + .mode_valid = inspur_connector_mode_valid, +}; + +static const struct drm_connector_funcs inspur_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void inspur_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + u32 reg; + struct drm_device *dev = encoder->dev; + struct inspur_drm_private *priv = dev->dev_private; + + reg = readl(priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); + reg |= INSPUR_DISPLAY_CONTROL_FPVDDEN(1); + reg |= INSPUR_DISPLAY_CONTROL_PANELDATE(1); + reg |= INSPUR_DISPLAY_CONTROL_FPEN(1); + reg |= INSPUR_DISPLAY_CONTROL_VBIASEN(1); + writel(reg, priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); +} + +static const struct drm_encoder_helper_funcs inspur_encoder_helper_funcs = { + .mode_set = inspur_encoder_mode_set, +}; + +static const struct drm_encoder_funcs inspur_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int inspur_vdac_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) { + DRM_ERROR("failed to alloc memory when init encoder\n"); + return -ENOMEM; + } + + encoder->possible_crtcs = 0x1; + ret = drm_encoder_init(dev, encoder, &inspur_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + DRM_ERROR("failed to init encoder: %d\n", ret); + return ret; + } + + drm_encoder_helper_add(encoder, &inspur_encoder_helper_funcs); + + connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL); + if (!connector) { + DRM_ERROR("failed to alloc memory when init connector\n"); + return -ENOMEM; + } + + ret = drm_connector_init(dev, connector, + &inspur_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ret; + } + drm_connector_helper_add(connector, &inspur_connector_helper_funcs); + + drm_connector_register(connector); + drm_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c new file mode 100644 index 000000000000..1c9acc776102 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include "inspur_drm_drv.h" + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + + return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args); +} + +const struct drm_mode_config_funcs inspur_mode_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, +}; -- Gitee From 08141b8d885fa99364bfdff80353dac563b1b093 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 2 Apr 2024 21:54:10 +0800 Subject: [PATCH 903/953] anolis: kvm: add kvm_arch_hypercall hook interface ANBZ: #8699 Signed-off-by: xiongmengbiao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3000 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/hygon/psp.c | 2 +- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/x86.c | 4 +++- 5 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 0c540ac3872e..348b78389406 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -137,6 +137,7 @@ KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL(get_untagged_addr) KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(arch_hypercall) KVM_X86_OP_OPTIONAL(control_pre_system_reset) KVM_X86_OP_OPTIONAL(control_post_system_reset) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c2442ffe3056..50de2e7f5961 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1757,6 +1757,7 @@ struct kvm_x86_ops { int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); + int (*arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index 9181ec2406ec..3d33afd4e644 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -629,4 +629,4 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, /* return psp_ret to guest */ kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} +} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e50e7fe1a86c..7ddd69635e41 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5011,6 +5011,22 @@ static int svm_vm_init(struct kvm *kvm) return 0; } +static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) +{ + int ret = 0; + + switch (nr) { + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(kvm, a0, a1, a2, a3); + break; + + default: + ret = -KVM_ENOSYS; + break; + } + return ret; +} + static struct kvm_x86_ops svm_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -5146,6 +5162,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vm_attestation = sev_vm_attestation, .control_pre_system_reset = csv_control_pre_system_reset, .control_post_system_reset = csv_control_post_system_reset, + .arch_hypercall = kvm_hygon_arch_hypercall, }; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index db86f36c38a0..3d382c0ea5be 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9964,7 +9964,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + ret = -KVM_ENOSYS; + if (kvm_x86_ops.arch_hypercall) + ret = static_call(kvm_x86_arch_hypercall)(vcpu->kvm, nr, a0, a1, a2, a3); break; default: ret = -KVM_ENOSYS; -- Gitee From 360d0937f78d8401f40511f222c6e18d56570aa4 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Wed, 3 Apr 2024 11:19:18 +0800 Subject: [PATCH 904/953] anolis: crypto: ccp: Eliminate dependence of the kvm module on the ccp module ANBZ: #8699 Because the KVM module calls certain interfaces from the ccp module, such as vpsp_try_do_cmd, it is necessary to load the ccp module before loading kvm. However, on CPUs other than Hygon, the ccp module might not be loaded, which would prevent the kvm module from loading. Therefore, we use function hooks to call functions from the ccp module. Now the module dependencies are as follows: [root@anolis ~]# lsmod | grep kvm kvm_amd 200704 0 kvm 1339392 1 kvm_amd ccp 352256 1 kvm_amd irqbypass 12288 2 vfio_pci_core,kvm Signed-off-by: xiongmengbiao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3000 --- arch/x86/include/asm/kvm_host.h | 2 - arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/svm/svm.c | 7 ++- drivers/crypto/ccp/Makefile | 3 +- .../hygon/psp.c => drivers/crypto/ccp/vpsp.c | 48 +++++++++---------- include/linux/psp-sev.h | 14 ++++++ 6 files changed, 47 insertions(+), 29 deletions(-) rename arch/x86/kvm/hygon/psp.c => drivers/crypto/ccp/vpsp.c (90%) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 50de2e7f5961..36cd8eee3f77 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2150,8 +2150,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 16b463703387..3a934e9b5778 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,7 @@ include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o hygon/psp.o + mmu/spte.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7ddd69635e41..677a89cbc2c6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5014,10 +5014,15 @@ static int svm_vm_init(struct kvm *kvm) static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) { int ret = 0; + struct kvm_vpsp vpsp = { + .kvm = kvm, + .write_guest = kvm_write_guest, + .read_guest = kvm_read_guest + }; switch (nr) { case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(kvm, a0, a1, a2, a3); + ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); break; default: diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 088d53009824..70bab9cbe3d5 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -14,7 +14,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ platform-access.o \ dbc.o \ psp-ringbuf.o \ - csv-dev.o + csv-dev.o \ + vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o diff --git a/arch/x86/kvm/hygon/psp.c b/drivers/crypto/ccp/vpsp.c similarity index 90% rename from arch/x86/kvm/hygon/psp.c rename to drivers/crypto/ccp/vpsp.c index 3d33afd4e644..3f18530c5353 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/drivers/crypto/ccp/vpsp.c @@ -185,7 +185,7 @@ static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) * newly allocated hva(host virtual address) and updates the mapping * relationship in the parent memory */ -static int guest_multiple_level_gpa_replace(struct kvm *kvm, +static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -200,7 +200,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, return -ENOMEM; /* get child gpa from parent gpa */ - if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, &sub_paddr, sizeof(sub_paddr)))) { pr_err("[%s]: kvm_read_guest for parent gpa failed\n", __func__); @@ -209,7 +209,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, } /* copy child block data from gpa to hva */ - if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_read_guest for sub_data failed\n", __func__); @@ -249,7 +249,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, * address) back to the memory corresponding to the gpa, and restores * the mapping relationship in the original parent memory */ -static int guest_multiple_level_gpa_restore(struct kvm *kvm, +static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -266,7 +266,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, } /* copy child block data from hva to gpa */ - if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", __func__); @@ -300,7 +300,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, * executes upper-layer abstract interfaces, including replacing and * restoring two sub-processing functions */ -static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, +static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, struct addr_map_tbls *map_tbls, int op) { int ret = 0; @@ -321,7 +321,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* restore new pa of kva with the gpa from guest */ - if (unlikely(guest_multiple_level_gpa_restore(kvm, + if (unlikely(guest_multiple_level_gpa_restore(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", __func__); @@ -352,7 +352,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* replace the gpa from guest with the new pa of kva */ - if (unlikely(guest_multiple_level_gpa_replace(kvm, + if (unlikely(guest_multiple_level_gpa_replace(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", __func__); @@ -390,7 +390,7 @@ static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls * information in the command buffer, the processed data will be * used to interact with the psp device */ -static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; @@ -402,7 +402,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, struct gpa2hva_tbls *g2h = NULL; uint32_t g2h_tbl_size; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; @@ -411,14 +411,14 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, if (!data) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { ret = -EFAULT; goto end; } if (table_gpa) { /* parse address map table from guest */ - if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, sizeof(struct addr_map_tbls)))) { pr_err("[%s]: kvm_read_guest for map_head failed\n", __func__); @@ -434,7 +434,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, map_tbl_size))) { pr_err("[%s]: kvm_read_guest for map_tbls failed\n", __func__); @@ -460,7 +460,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", __func__); ret = -EFAULT; @@ -484,13 +484,13 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, * pointer of the mapping table when the command has finished * interacting with the psp device */ -static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; if (hbuf->map_tbls) { - if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, hbuf->map_tbls, 1)) { pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", __func__); @@ -500,7 +500,7 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, } /* restore cmdresp's buffer from context */ - if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, hbuf->data_size))) { pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", __func__); @@ -526,7 +526,7 @@ static int cmd_type_is_tkm(int cmd) /* * The primary implementation interface of virtual PSP in kernel mode */ -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, gpa_t table_gpa) { int ret = 0; @@ -540,21 +540,21 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { // check the permission to use the default vid when no vid is set - ret = vpsp_get_vid(&vid, kvm->userspace_pid); + ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } } - if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) return -EFAULT; switch (psp_ret.status) { case VPSP_INIT: /* multilevel pointer replace*/ - ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); if (unlikely(ret)) { psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", @@ -581,7 +581,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, ret = 0; } else if (psp_ret.status == VPSP_FINISH) { /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -609,7 +609,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, ret = 0; } else if (psp_ret.status == VPSP_FINISH) { /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &g_hbuf_wrap[prio][index]); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", @@ -627,6 +627,6 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } end: /* return psp_ret to guest */ - kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; } EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 74086c114184..9a144026f89a 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -13,6 +13,7 @@ #define __PSP_SEV_H__ #include +#include #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ @@ -671,6 +672,12 @@ struct vpsp_ret { u32 status : 2; }; +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +}; + #define PSP_VID_MASK 0xff #define PSP_VID_SHIFT 56 #define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) @@ -827,6 +834,9 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int vpsp_get_vid(uint32_t *vid, pid_t pid); int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -874,6 +884,10 @@ vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { r static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 722dcf74fde6576aa00894169f64e998e61c9afa Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 8 Apr 2024 17:09:39 +0800 Subject: [PATCH 905/953] anolis: bugfix: crypto: ccp: remove repeated sm4-hs mode ANBZ: #8582 remove the repeated sm4-hs mode definition, otherwise, it will caused ccp-crypto module load err in the following version of kernel-6.6, eg. 6.6.20 Signed-off-by: yangdepei Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3052 --- drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index 0d1c750ff118..2328a9f87218 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -205,15 +205,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = 0, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_ECB_HS, - .version = CCP_VERSION(5, 0), - .name = "ecb(sm4)", - .driver_name = "ecb-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = 0, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_CBC, .version = CCP_VERSION(5, 0), @@ -232,15 +223,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = SM4_BLOCK_SIZE, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_CBC_HS, - .version = CCP_VERSION(5, 0), - .name = "cbc(sm4)", - .driver_name = "cbc-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_OFB, .version = CCP_VERSION(5, 0), -- Gitee From 2fd544183104584b1dab133dbaf38c124441e6c9 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 11:26:43 +0800 Subject: [PATCH 906/953] anolis: fs/resctrl: Remove mbm_Bps features checking from resctrl_is_mbm_{enabled,event} ANBZ: #8763 A new mbm_Bps monitoring features is introduced by commit 09d14609221e ("anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps"). Since it is not a non-decreasing counter, but an instantaneous value in a hardware window, it should not be treated as true in function resctrl_is_mbm_{enabled,event}. Fixes: 09d14609221e ("anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps") Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- fs/resctrl/rdtgroup.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index e39f22453d84..ea969ddb1a9d 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -113,14 +113,13 @@ void rdt_staged_configs_clear(void) static bool resctrl_is_mbm_enabled(void) { return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled() || - resctrl_arch_is_mbm_bps_enabled()); + resctrl_arch_is_mbm_local_enabled()); } static bool resctrl_is_mbm_event(int e) { return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_MC_MBM_BPS_EVENT_ID); + e <= QOS_L3_MBM_LOCAL_EVENT_ID); } /* -- Gitee From d71acda5080a5f39d4a1a3c2298f790cff859d24 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 09:45:55 +0800 Subject: [PATCH 907/953] anolis: fs/resctrl: Cancel the delayed checking works when resctrl is umounted ANBZ: #8763 In the following scenario, an MPAM error interrupt with MSMONCFG_ID_RANGE error code has been triggered: mount -t resctrl resctrl /sys/fs/resctrl # create the maximum ctrl&mon groups umount /sys/fs/resctrl mount -t resctrl resctrl -o cdp /sys/fs/resctrl To keep all the unused rmids clean, a delayed work cqm_handle_limbo() periodically checks and recycles the dirty rmids in the background, even when the resctrl is umounted. When remounting restrl with cdp enabled, the `cdp_enabled` will be set true and the actual PARTID will correspond to twice the closid. Then the delayed work could access the out-of-range PARTID by resctrl_arch_rmid_read(). Another delayed work mbm_handle_overflow() has the same problem. To alleviate this issue, cancel the delayed works when resctrl is umounted. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- fs/resctrl/rdtgroup.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index ea969ddb1a9d..643ea199c428 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -2792,6 +2792,9 @@ static void rmdir_all_sub(void) static void rdt_kill_sb(struct super_block *sb) { + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); @@ -2801,6 +2804,20 @@ static void rdt_kill_sb(struct super_block *sb) resctrl_arch_reset_resources(); rmdir_all_sub(); + + /* + * When resctrl is umounted, forcefully cancel delayed works since the + * new mount option may be changed. + */ + list_for_each_entry(d, &l3->domains, list) { + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + } + rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); -- Gitee From 8c869b23387b0668fa476f526bceff6fdd9ba064 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 11:40:38 +0800 Subject: [PATCH 908/953] anolis: arm_mpam: Fix the ris field parsing typo in error irq handler ANBZ: #8763 The ris field in MPAMF_ESR should be parsed from MPAMF_ESR_RIS instead of MPAMF_ESR_PMG. Fix this typo. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- drivers/platform/mpam/mpam_devices.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 906f8a6b6940..0134263c88af 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2055,7 +2055,7 @@ static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); pmg = FIELD_GET(MPAMF_ESR_PMG, reg); - ris = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_RIS, reg); pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", msc->id, mpam_errcode_names[errcode], partid, pmg, ris); -- Gitee From 609c4a6634d265d77baa72581c22a1bb0e4b5029 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 17 Apr 2024 15:25:43 +0800 Subject: [PATCH 909/953] anolis: loongarch: fix spec to support compiling vmlinuz.efi ANBZ: #8779 The current spec only supports compilation of elf vmlinux, and grub2 can only support the efi stub kernel, so the spec is fixed to compile vmlinuz in efi format to support kernel startup. Signed-off-by: Jing Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3071 --- anolis/rpm/kernel.spec.template | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 109e09ae53fe..56a4c7051b6d 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -142,9 +142,9 @@ %ifarch loongarch64 %define all_arch_configs %{name}-%{version}-loongarch64*.config %define asmarch loongarch -%define make_target vmlinux +%define make_target vmlinuz.efi %define hdrarch loongarch -%define kernel_image vmlinux +%define kernel_image arch/loongarch/boot/vmlinuz.efi %endif # To temporarily exclude an architecture from being built, add it to -- Gitee From 3675e6ac64cbb6f9c8c9ec9ad6b7b805c218937d Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 23 Apr 2024 10:43:39 +0800 Subject: [PATCH 910/953] anolis: Fix the warning message when uninstalling the kernel ANBZ: #8620 Remove modules.builtin.alias.bin after built kernel, the same as commit: 375333772562("anolis: spec: remove modules.builtin.alias.bin after built kernel") Then change the files copied to the /boot/ directory to %ghost to avoid warnings about repeatedly deleting these files when the kernel is uninstalled. Signed-off-by: Jing Zhang Reviewed-by: Qiao Ma Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3083 --- anolis/rpm/kernel.spec.template | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 56a4c7051b6d..2e2a4558dcc9 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -976,7 +976,7 @@ BuildKernel() { # remove files that will be auto generated by depmod at rpm -i time pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ - rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} popd # Copy the System.map file for depmod to use, and create a backup of the @@ -1010,7 +1010,7 @@ BuildKernel() { # remove files that will be auto generated by depmod at rpm -i time pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ - rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} popd # Cleanup @@ -1558,12 +1558,12 @@ fi %ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ %endif\ %attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ -%attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +%ghost %attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ /lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ /lib/modules/%{KVERREL}%{?2:+%{2}}/config\ -%attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ -%attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ -%attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%ghost %attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%ghost %attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%ghost %attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ %dir /lib/modules\ %dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ /lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ -- Gitee From 97bf07111f7a2d2cd6709abe0a9b29abccccf438 Mon Sep 17 00:00:00 2001 From: Lucas Karpinski Date: Tue, 14 Nov 2023 10:11:31 -0500 Subject: [PATCH 911/953] selftests/net: synchronize udpgro tests' tx and rx connection ANBZ: #8863 commit 3bdd9fd29cb0f136b307559a19c107210ad5c314 upstream. The sockets used by udpgso_bench_tx aren't always ready when udpgso_bench_tx transmits packets. This issue is more prevalent in -rt kernels, but can occur in both. Replace the hacky sleep calls with a function that checks whether the ports in the namespace are ready for use. Suggested-by: Paolo Abeni Signed-off-by: Lucas Karpinski Reviewed-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/net_helper.sh | 22 +++++++++++++++++++ tools/testing/selftests/net/udpgro.sh | 13 +++++------ tools/testing/selftests/net/udpgro_bench.sh | 5 +++-- tools/testing/selftests/net/udpgro_frglist.sh | 5 +++-- 4 files changed, 34 insertions(+), 11 deletions(-) create mode 100755 tools/testing/selftests/net/net_helper.sh diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh new file mode 100755 index 000000000000..4fe0befa13fb --- /dev/null +++ b/tools/testing/selftests/net/net_helper.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Helper functions + +wait_local_port_listen() +{ + local listener_ns="${1}" + local port="${2}" + local protocol="${3}" + local port_hex + local i + + port_hex="$(printf "%04X" "${port}")" + for i in $(seq 10); do + if ip netns exec "${listener_ns}" cat /proc/net/"${protocol}"* | \ + grep -q "${port_hex}"; then + break + fi + sleep 0.1 + done +} diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index 3f09ac78f445..8802604148dd 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -3,6 +3,8 @@ # # Run a series of udpgro functional tests. +source net_helper.sh + readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" BPF_FILE="xdp_dummy.o" @@ -51,8 +53,7 @@ run_one() { echo "ok" || \ echo "failed" & - # Hack: let bg programs complete the startup - sleep 0.2 + wait_local_port_listen ${PEER_NS} 8000 udp ./udpgso_bench_tx ${tx_args} ret=$? wait $(jobs -p) @@ -97,7 +98,7 @@ run_one_nat() { echo "ok" || \ echo "failed"& - sleep 0.1 + wait_local_port_listen "${PEER_NS}" 8000 udp ./udpgso_bench_tx ${tx_args} ret=$? kill -INT $pid @@ -118,11 +119,9 @@ run_one_2sock() { echo "ok" || \ echo "failed" & - # Hack: let bg programs complete the startup - sleep 0.2 + wait_local_port_listen "${PEER_NS}" 12345 udp ./udpgso_bench_tx ${tx_args} -p 12345 - sleep 0.1 - # first UDP GSO socket should be closed at this point + wait_local_port_listen "${PEER_NS}" 8000 udp ./udpgso_bench_tx ${tx_args} ret=$? wait $(jobs -p) diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh index 65ff1d424008..7080eae5312b 100755 --- a/tools/testing/selftests/net/udpgro_bench.sh +++ b/tools/testing/selftests/net/udpgro_bench.sh @@ -3,6 +3,8 @@ # # Run a series of udpgro benchmarks +source net_helper.sh + readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" BPF_FILE="xdp_dummy.o" @@ -40,8 +42,7 @@ run_one() { ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r & ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r & - # Hack: let bg programs complete the startup - sleep 0.2 + wait_local_port_listen "${PEER_NS}" 8000 udp ./udpgso_bench_tx ${tx_args} } diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh index bd51d386b52e..e1ff645bd3d1 100755 --- a/tools/testing/selftests/net/udpgro_frglist.sh +++ b/tools/testing/selftests/net/udpgro_frglist.sh @@ -3,6 +3,8 @@ # # Run a series of udpgro benchmarks +source net_helper.sh + readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" BPF_FILE="xdp_dummy.o" @@ -45,8 +47,7 @@ run_one() { echo ${rx_args} ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r & - # Hack: let bg programs complete the startup - sleep 0.2 + wait_local_port_listen "${PEER_NS}" 8000 udp ./udpgso_bench_tx ${tx_args} } -- Gitee From c3ef571c10aa28519a56f5f854168c599d3547cc Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Sat, 2 Dec 2023 10:00:57 +0800 Subject: [PATCH 912/953] selftests/net: add lib.sh ANBZ: #8863 commit 25ae948b447881bf689d459cd5bd4629d9c04b20 upstream. Add a lib.sh for net selftests. This file can be used to define commonly used variables and functions. Some commonly used functions can be moved from forwarding/lib.sh to this lib file. e.g. busywait(). Add function setup_ns() for user to create unique namespaces with given prefix name. Reviewed-by: Petr Machata Signed-off-by: Hangbin Liu Signed-off-by: Paolo Abeni Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/Makefile | 2 +- tools/testing/selftests/net/forwarding/lib.sh | 27 +----- tools/testing/selftests/net/lib.sh | 85 +++++++++++++++++++ 3 files changed, 87 insertions(+), 27 deletions(-) create mode 100644 tools/testing/selftests/net/lib.sh diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index de4506e2a412..7dbdc5ebbb75 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -54,7 +54,7 @@ TEST_PROGS += ip_local_port_range.sh TEST_PROGS += rps_default_mask.sh TEST_PROGS += big_tcp.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh -TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh +TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh lib.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index e37a15eda6c2..8f6ca458af9a 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -4,9 +4,6 @@ ############################################################################## # Defines -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - # Can be overridden by the configuration file. PING=${PING:=ping} PING6=${PING6:=ping6} @@ -41,6 +38,7 @@ if [[ -f $relative_path/forwarding.config ]]; then source "$relative_path/forwarding.config" fi +source ../lib.sh ############################################################################## # Sanity checks @@ -395,29 +393,6 @@ log_info() echo "INFO: $msg" } -busywait() -{ - local timeout=$1; shift - - local start_time="$(date -u +%s%3N)" - while true - do - local out - out=$("$@") - local ret=$? - if ((!ret)); then - echo -n "$out" - return 0 - fi - - local current_time="$(date -u +%s%3N)" - if ((current_time - start_time > timeout)); then - echo -n "$out" - return 1 - fi - done -} - not() { "$@" diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh new file mode 100644 index 000000000000..518eca57b815 --- /dev/null +++ b/tools/testing/selftests/net/lib.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +############################################################################## +# Defines + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +############################################################################## +# Helpers +busywait() +{ + local timeout=$1; shift + + local start_time="$(date -u +%s%3N)" + while true + do + local out + out=$("$@") + local ret=$? + if ((!ret)); then + echo -n "$out" + return 0 + fi + + local current_time="$(date -u +%s%3N)" + if ((current_time - start_time > timeout)); then + echo -n "$out" + return 1 + fi + done +} + +cleanup_ns() +{ + local ns="" + local errexit=0 + local ret=0 + + # disable errexit temporary + if [[ $- =~ "e" ]]; then + errexit=1 + set +e + fi + + for ns in "$@"; do + ip netns delete "${ns}" &> /dev/null + if ! busywait 2 ip netns list \| grep -vq "^$ns$" &> /dev/null; then + echo "Warn: Failed to remove namespace $ns" + ret=1 + fi + done + + [ $errexit -eq 1 ] && set -e + return $ret +} + +# setup netns with given names as prefix. e.g +# setup_ns local remote +setup_ns() +{ + local ns="" + local ns_name="" + local ns_list="" + for ns_name in "$@"; do + # Some test may setup/remove same netns multi times + if unset ${ns_name} 2> /dev/null; then + ns="${ns_name,,}-$(mktemp -u XXXXXX)" + eval readonly ${ns_name}="$ns" + else + eval ns='$'${ns_name} + cleanup_ns "$ns" + + fi + + if ! ip netns add "$ns"; then + echo "Failed to create namespace $ns_name" + cleanup_ns "$ns_list" + return $ksft_skip + fi + ip -n "$ns" link set lo up + ns_list="$ns_list $ns" + done +} -- Gitee From 21e10634e5524b91fdf310f08599c84c413ab1f1 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 13 Dec 2023 14:08:44 +0800 Subject: [PATCH 913/953] selftests/net: add variable NS_LIST for lib.sh ANBZ: #8863 commit b6925b4ed57cccf42ca0fb46c7446f0859e7ad4b upstream. Add a global variable NS_LIST to store all the namespaces that setup_ns created, so the caller could call cleanup_all_ns() instead of remember all the netns names when using cleanup_ns(). Signed-off-by: Hangbin Liu Link: https://lore.kernel.org/r/20231213060856.4030084-2-liuhangbin@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/lib.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index 518eca57b815..dca549443801 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -6,6 +6,8 @@ # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +# namespace list created by setup_ns +NS_LIST="" ############################################################################## # Helpers @@ -56,6 +58,11 @@ cleanup_ns() return $ret } +cleanup_all_ns() +{ + cleanup_ns $NS_LIST +} + # setup netns with given names as prefix. e.g # setup_ns local remote setup_ns() @@ -82,4 +89,5 @@ setup_ns() ip -n "$ns" link set lo up ns_list="$ns_list $ns" done + NS_LIST="$NS_LIST $ns_list" } -- Gitee From d9f47dd4e244bb7a228744a94a93994ef86db0be Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Thu, 4 Jan 2024 09:11:09 -0500 Subject: [PATCH 914/953] selftests: forwarding: Avoid failures to source net/lib.sh ANBZ: #8863 commit 2114e83381d3289a88378850f43069e79f848083 upstream. The expression "source ../lib.sh" added to net/forwarding/lib.sh in commit 25ae948b4478 ("selftests/net: add lib.sh") does not work for tests outside net/forwarding which source net/forwarding/lib.sh (1). It also does not work in some cases where only a subset of tests are exported (2). Avoid the problems mentioned above by replacing the faulty expression with a copy of the content from net/lib.sh which is used by files under net/forwarding. A more thorough solution which avoids duplicating content between net/lib.sh and net/forwarding/lib.sh has been posted here: https://lore.kernel.org/netdev/20231222135836.992841-1-bpoirier@nvidia.com/ The approach in the current patch is a stopgap solution to avoid submitting large changes at the eleventh hour of this development cycle. Example of problem 1) tools/testing/selftests/drivers/net/bonding$ ./dev_addr_lists.sh ./net_forwarding_lib.sh: line 41: ../lib.sh: No such file or directory TEST: bonding cleanup mode active-backup [ OK ] TEST: bonding cleanup mode 802.3ad [ OK ] TEST: bonding LACPDU multicast address to slave (from bond down) [ OK ] TEST: bonding LACPDU multicast address to slave (from bond up) [ OK ] An error message is printed but since the test does not use functions from net/lib.sh, the test results are not affected. Example of problem 2) tools/testing/selftests$ make install TARGETS="net/forwarding" tools/testing/selftests$ cd kselftest_install/net/forwarding/ tools/testing/selftests/kselftest_install/net/forwarding$ ./pedit_ip.sh veth{0..3} lib.sh: line 41: ../lib.sh: No such file or directory TEST: ping [ OK ] TEST: ping6 [ OK ] ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth1 ingress pedit ip src set 198.51.100.1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth2 egress pedit ip src set 198.51.100.1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth1 ingress pedit ip dst set 198.51.100.1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth2 egress pedit ip dst set 198.51.100.1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth1 ingress pedit ip6 src set 2001:db8:2::1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth2 egress pedit ip6 src set 2001:db8:2::1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth1 ingress pedit ip6 dst set 2001:db8:2::1 [FAIL] Expected to get 10 packets, but got . ./pedit_ip.sh: line 135: busywait: command not found TEST: dev veth2 egress pedit ip6 dst set 2001:db8:2::1 [FAIL] Expected to get 10 packets, but got . In this case, the test results are affected. Fixes: 25ae948b4478 ("selftests/net: add lib.sh") Suggested-by: Ido Schimmel Suggested-by: Petr Machata Reviewed-by: Ido Schimmel Tested-by: Petr Machata Signed-off-by: Benjamin Poirier Reviewed-by: Hangbin Liu Link: https://lore.kernel.org/r/20240104141109.100672-1-bpoirier@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/forwarding/lib.sh | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 8f6ca458af9a..97e7675da04f 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -38,7 +38,32 @@ if [[ -f $relative_path/forwarding.config ]]; then source "$relative_path/forwarding.config" fi -source ../lib.sh +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +busywait() +{ + local timeout=$1; shift + + local start_time="$(date -u +%s%3N)" + while true + do + local out + out=$("$@") + local ret=$? + if ((!ret)); then + echo -n "$out" + return 0 + fi + + local current_time="$(date -u +%s%3N)" + if ((current_time - start_time > timeout)); then + echo -n "$out" + return 1 + fi + done +} + ############################################################################## # Sanity checks -- Gitee From 89fb678cba6871403ccf6b3d9975475699b12136 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 24 Jan 2024 14:13:44 +0800 Subject: [PATCH 915/953] selftests/net/lib: update busywait timeout value ANBZ: #8863 commit fc836129f708407502632107e58d48f54b1caf75 upstream. The busywait timeout value is a millisecond, not a second. So the current setting 2 is too small. On slow/busy host (or VMs) the current timeout can expire even on "correct" execution, causing random failures. Let's copy the WAIT_TIMEOUT from forwarding/lib.sh and set BUSYWAIT_TIMEOUT here. Fixes: 25ae948b4478 ("selftests/net: add lib.sh") Signed-off-by: Hangbin Liu Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20240124061344.1864484-1-liuhangbin@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/lib.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index dca549443801..f9fe182dfbd4 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -4,6 +4,9 @@ ############################################################################## # Defines +WAIT_TIMEOUT=${WAIT_TIMEOUT:=20} +BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms + # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 # namespace list created by setup_ns @@ -48,7 +51,7 @@ cleanup_ns() for ns in "$@"; do ip netns delete "${ns}" &> /dev/null - if ! busywait 2 ip netns list \| grep -vq "^$ns$" &> /dev/null; then + if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then echo "Warn: Failed to remove namespace $ns" ret=1 fi -- Gitee From d822bd63b5c7fff8b2bc341f5dbe5430255d5c6c Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 12 Feb 2024 11:19:23 +0100 Subject: [PATCH 916/953] selftests: net: more strict check in net_helper ANBZ: #8863 commit a71d0908e32f3dd41e355d83eeadd44d94811fd6 upstream. The helper waiting for a listener port can match any socket whose hexadecimal representation of source or destination addresses matches that of the given port. Additionally, any socket state is accepted. All the above can let the helper return successfully before the relevant listener is actually ready, with unexpected results. So far I could not find any related failure in the netdev CI, but the next patch is going to make the critical event more easily reproducible. Address the issue matching the port hex only vs the relevant socket field and additionally checking the socket state for TCP sockets. Fixes: 3bdd9fd29cb0 ("selftests/net: synchronize udpgro tests' tx and rx connection") Signed-off-by: Paolo Abeni Link: https://lore.kernel.org/r/192b3dbc443d953be32991d1b0ca432bd4c65008.1707731086.git.pabeni@redhat.com Signed-off-by: Jakub Kicinski Signed-off-by: Heng Qi Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3112 --- tools/testing/selftests/net/net_helper.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh index 4fe0befa13fb..6596fe03c77f 100755 --- a/tools/testing/selftests/net/net_helper.sh +++ b/tools/testing/selftests/net/net_helper.sh @@ -8,13 +8,16 @@ wait_local_port_listen() local listener_ns="${1}" local port="${2}" local protocol="${3}" - local port_hex + local pattern local i - port_hex="$(printf "%04X" "${port}")" + pattern=":$(printf "%04X" "${port}") " + + # for tcp protocol additionally check the socket state + [ ${protocol} = "tcp" ] && pattern="${pattern}0A" for i in $(seq 10); do - if ip netns exec "${listener_ns}" cat /proc/net/"${protocol}"* | \ - grep -q "${port_hex}"; then + if ip netns exec "${listener_ns}" awk '{print $2" "$4}' \ + /proc/net/"${protocol}"* | grep -q "${pattern}"; then break fi sleep 0.1 -- Gitee From 194c88172dfc318b1e7c2080a44b49aa17373a06 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 29 Apr 2024 17:05:14 +0800 Subject: [PATCH 917/953] anolis: kfence: Fix the check about sample_interval ANBZ: #8499 num should be signed long to prevent user change sample_interval across from positive to negative (and vice versa). Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3113 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d5329b1c560b..0e0219aa3565 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -91,7 +91,7 @@ DEFINE_STATIC_KEY_TRUE(kfence_order0_page); static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; + long num; int ret = kstrtol(val, 0, &num); if (ret < 0) -- Gitee From fa24eca9a865905f214b33851c0b86ace009d08d Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 29 Apr 2024 17:09:55 +0800 Subject: [PATCH 918/953] anolis: kfence: Fix a race condition about slab_want_init_on_free() ANBZ: #8499 When porting kfence from 5.10, we wrongly handled the new upstream feature about shortening the critical zone about meta->lock. Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3113 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 0e0219aa3565..2fe1b509778a 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -988,7 +988,7 @@ static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zo * data is still there, and after a use-after-free is detected, we * unprotect the page, so the data is still accessible. */ - if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + if (!zombie && unlikely(init)) memzero_explicit(addr, meta->size); } -- Gitee From 4403bbc28b467a38f5f344ad1f2cd22faed20864 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 12 Nov 2023 10:11:25 +0200 Subject: [PATCH 919/953] ovl: fix memory leak in ovl_parse_param() ANBZ: #8848 commit 37f32f52643869131ec01bb69bdf9f404f6109fb upstream. On failure to parse parameters in ovl_parse_param_lowerdir(), it is necessary to update ctx->nr with the correct nr before using ovl_reset_lowerdirs() to release l->name. Reported-and-tested-by: syzbot+26eedf3631650972f17c@syzkaller.appspotmail.com Fixes: c835110b588a ("ovl: remove unused code in lowerdir param parsing") Co-authored-by: Edward Adam Davis Signed-off-by: Amir Goldstein Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3115 --- fs/overlayfs/params.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c index ad3593a41fb5..488f920f79d2 100644 --- a/fs/overlayfs/params.c +++ b/fs/overlayfs/params.c @@ -438,7 +438,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) struct ovl_fs_context *ctx = fc->fs_private; struct ovl_fs_context_layer *l; char *dup = NULL, *iter; - ssize_t nr_lower = 0, nr = 0, nr_data = 0; + ssize_t nr_lower, nr; bool data_layer = false; /* @@ -490,6 +490,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) iter = dup; l = ctx->lower; for (nr = 0; nr < nr_lower; nr++, l++) { + ctx->nr++; memset(l, 0, sizeof(*l)); err = ovl_mount_dir(iter, &l->path); @@ -506,10 +507,10 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) goto out_put; if (data_layer) - nr_data++; + ctx->nr_data++; /* Calling strchr() again would overrun. */ - if ((nr + 1) == nr_lower) + if (ctx->nr == nr_lower) break; err = -EINVAL; @@ -519,7 +520,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) * This is a regular layer so we require that * there are no data layers. */ - if ((ctx->nr_data + nr_data) > 0) { + if (ctx->nr_data > 0) { pr_err("regular lower layers cannot follow data lower layers"); goto out_put; } @@ -532,8 +533,6 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc) data_layer = true; iter++; } - ctx->nr = nr_lower; - ctx->nr_data += nr_data; kfree(dup); return 0; -- Gitee From 67c03d2dc81e1e469933d4fe7551cb32badb7bc3 Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Sat, 13 Apr 2024 17:34:31 -0700 Subject: [PATCH 920/953] fuse: fix leaked ENOSYS error on first statx call ANBZ: #8934 commit eb4b691b9115fae4c844f5941418335575cf667f upstream. FUSE attempts to detect server support for statx by trying it once and setting no_statx=1 if it fails with ENOSYS, but consider the following scenario: - Userspace (e.g. sh) calls stat() on a file * succeeds - Userspace (e.g. lsd) calls statx(BTIME) on the same file - request_mask = STATX_BASIC_STATS | STATX_BTIME - first pass: sync=true due to differing cache_mask - statx fails and returns ENOSYS - set no_statx and retry - retry sets mask = STATX_BASIC_STATS - now mask == cache_mask; sync=false (time_before: still valid) - so we take the "else if (stat)" path - "err" is still ENOSYS from the failed statx call Fix this by zeroing "err" before retrying the failed call. Fixes: d3045530bdd2 ("fuse: implement statx") Cc: stable@vger.kernel.org # v6.6 Signed-off-by: Danny Lin Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3118 --- fs/fuse/dir.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 3c11effe88c4..189028edb2ca 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1319,6 +1319,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file, err = fuse_do_statx(inode, file, stat); if (err == -ENOSYS) { fc->no_statx = 1; + err = 0; goto retry; } } else { -- Gitee From a09a57432f7046e6ff43559e60db5adaf40937cb Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Wed, 27 Mar 2024 16:17:17 +0500 Subject: [PATCH 921/953] x86/selftests: Skip the tests if prerequisites aren't fulfilled ANBZ: #8853 commit 99c84311e35f9399bdce666f6306a048e2a5b404 linux-next Skip instead of failing when prerequisite conditions aren't fulfilled, such as invalid xstate values etc. Make the tests show as 'SKIP' when run: make -C tools/testing/selftests/ TARGETS=x86 run_tests ... # timeout set to 45 # selftests: x86: amx_64 # # xstate cpuid: invalid tile data size/offset: 0/0 ok 42 selftests: x86: amx_64 # SKIP # timeout set to 45 # selftests: x86: lam_64 # # Unsupported LAM feature! ok 43 selftests: x86: lam_64 # SKIP ... In the AMX test, Move away from check_cpuid_xsave() and start using arch_prctl() to find out if AMX support is present or not. In the kernels where AMX isn't present, arch_prctl() returns -EINVAL, hence it is backward compatible. Signed-off-by: Muhammad Usama Anjum Signed-off-by: Ingo Molnar Reviewed-by: Chang S. Bae Reviewed-by: Binbin Wu Acked-by: Kirill A. Shutemov Link: https://lore.kernel.org/r/20240327111720.3509180-1-usama.anjum@collabora.com Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3125 --- tools/testing/selftests/x86/amx.c | 27 ++++++++++----------------- tools/testing/selftests/x86/lam.c | 2 +- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c index d884fd69dd51..95aad6d8849b 100644 --- a/tools/testing/selftests/x86/amx.c +++ b/tools/testing/selftests/x86/amx.c @@ -103,21 +103,6 @@ static void clearhandler(int sig) #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) -static inline void check_cpuid_xsave(void) -{ - uint32_t eax, ebx, ecx, edx; - - /* - * CPUID.1:ECX.XSAVE[bit 26] enumerates general - * support for the XSAVE feature set, including - * XGETBV. - */ - __cpuid_count(1, 0, eax, ebx, ecx, edx); - if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK)) - fatal_error("cpuid: no CPU xsave support"); - if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK)) - fatal_error("cpuid: no OS xsave support"); -} static uint32_t xbuf_size; @@ -350,6 +335,7 @@ enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED }; /* arch_prctl() and sigaltstack() test */ +#define ARCH_GET_XCOMP_SUPP 0x1021 #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 @@ -928,8 +914,15 @@ static void test_ptrace(void) int main(void) { - /* Check hardware availability at first */ - check_cpuid_xsave(); + unsigned long features; + long rc; + + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features); + if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) { + ksft_print_msg("no AMX support\n"); + return KSFT_SKIP; + } + check_cpuid_xtiledata(); init_stashed_xsave(); diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c index 8f9b06d9ce03..edc14b15da34 100644 --- a/tools/testing/selftests/x86/lam.c +++ b/tools/testing/selftests/x86/lam.c @@ -1183,7 +1183,7 @@ int main(int argc, char **argv) if (!cpu_has_lam()) { ksft_print_msg("Unsupported LAM feature!\n"); - return -1; + return KSFT_SKIP; } while ((c = getopt(argc, argv, "ht:")) != -1) { -- Gitee From cac4fcb8353a9c3ec6f607887e144ab7da2bd18e Mon Sep 17 00:00:00 2001 From: Liu Wei Date: Tue, 7 May 2024 11:40:37 +0800 Subject: [PATCH 922/953] anolis: block: use %px to print request in rq_hang ANBZ: #8947 In function blk_mq_debugfs_rq_hang_show, an incorrect format string when printing the request. "%p" is plain pointer, which is hashed to prevent leaking information about the kernel memory layout, so change it to "%px" Signed-off-by: Liu Wei Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3129 --- block/blk-mq-debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 271535f56bd2..f42314c86377 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -354,7 +354,7 @@ static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) struct bio_vec *bvec; struct bvec_iter_all iter_all; - seq_printf(m, "%p {.op=", rq); + seq_printf(m, "%px {.op=", rq); if (strcmp(op_str, "UNKNOWN") == 0) seq_printf(m, "%u", op); else -- Gitee From e55585a068bba3cb841fc25d4c6553258afd6757 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 29 Apr 2024 16:05:52 +0800 Subject: [PATCH 923/953] irqchip/loongson-pch-pic: Update interrupt registration policy ANBZ: #8927 commit 234a557e28b9142e07eae21083a04fffef83ee8d upstream The current code is using a fixed mapping between the LS7A interrupt source and the HT interrupt vector. This prevents the utilization of the full interrupt vector space and therefore limits the number of interrupt source in a system. Replace the fixed mapping with a dynamic mapping which allocates a vector when an interrupt source is set up. This avoids that unused sources prevent vectors from being used for other devices. Introduce a mapping table in struct pch_pic, where each interrupt source will allocate an index as a 'hwirq' number from the table in the order of application and set table value as interrupt source number. This hwirq number will be configured as vector in the HT interrupt controller. For an interrupt source, the validity period of the obtained hwirq will last until the system reset. Co-developed-by: Biao Dong Signed-off-by: Biao Dong Co-developed-by: Tianyang Zhang Signed-off-by: Tianyang Zhang Signed-off-by: Baoqi Zhang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240422093830.27212-1-zhangtianyang@loongson.cn Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3136 --- drivers/irqchip/irq-loongson-pch-pic.c | 76 ++++++++++++++++++++------ 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 3b150b6121fc..1f244e9de9be 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,7 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_UNDEF_VECTOR 255 #define PIC_COUNT_PER_REG64 64 #define PIC_REG64_COUNT 1 #define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) @@ -50,12 +51,19 @@ struct pch_pic { u32 saved_vec_en[PIC_REG_COUNT]; u32 saved_vec_pol[PIC_REG_COUNT]; u32 saved_vec_edge[PIC_REG_COUNT]; + u8 table[PIC_COUNT]; + int inuse; }; static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq) +{ + return priv->table[hirq]; +} + struct irq_domain *get_pchpic_irq_domain(void) { return pch_pic_priv[0]->pic_domain; @@ -89,45 +97,47 @@ static void pch_pic_mask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq)); irq_chip_mask_parent(d); } static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - writeq(BIT(PIC_REG64_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); irq_chip_unmask_parent(d); - pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_MASK, bit); } static int pch_pic_set_type(struct irq_data *d, unsigned int type) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); int ret = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; case IRQ_TYPE_LEVEL_LOW: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; default: @@ -142,11 +152,12 @@ static void pch_pic_ack_irq(struct irq_data *d) { unsigned int reg; struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); - if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writeq(BIT(PIC_REG64_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); + reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4); + if (reg & BIT(PIC_REG_BIT(bit))) { + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); } irq_chip_ack_parent(d); } @@ -168,6 +179,8 @@ static int pch_pic_domain_translate(struct irq_domain *d, { struct pch_pic *priv = d->host_data; struct device_node *of_node = to_of_node(fwspec->fwnode); + unsigned long flags; + int i; if (of_node) { if (fwspec->param_count < 2) @@ -180,12 +193,33 @@ static int pch_pic_domain_translate(struct irq_domain *d, return -EINVAL; *hwirq = fwspec->param[0] - priv->gsi_base; + if (fwspec->param_count > 1) *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; else *type = IRQ_TYPE_NONE; } + raw_spin_lock_irqsave(&priv->pic_lock, flags); + /* Check pic-table to confirm if the hwirq has been assigned */ + for (i = 0; i < priv->inuse; i++) { + if (priv->table[i] == *hwirq) { + *hwirq = i; + break; + } + } + if (i == priv->inuse) { + /* Assign a new hwirq in pic-table */ + if (priv->inuse >= PIC_COUNT) { + pr_err("pch-pic domain has no free vectors\n"); + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return -EINVAL; + } + priv->table[priv->inuse] = *hwirq; + *hwirq = priv->inuse++; + } + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return 0; } @@ -203,6 +237,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, if (err) return err; + /* Write vector ID */ + writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq))); + parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; parent_fwspec.param[0] = hwirq + priv->ht_vec_base; @@ -231,7 +268,7 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_COUNT; i++) { /* Write vector ID */ - writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); + writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); } @@ -295,6 +332,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, u32 gsi_base) { struct pch_pic *priv; + int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -305,6 +343,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, if (!priv->base) goto free_priv; + priv->inuse = 0; + for (i = 0; i < PIC_COUNT; i++) + priv->table[i] = PIC_UNDEF_VECTOR; + priv->ht_vec_base = vec_base; priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; priv->gsi_base = gsi_base; -- Gitee From a7d98f239781bdf79084112a4e5576746b159bcc Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Tue, 7 May 2024 20:04:25 +0800 Subject: [PATCH 924/953] anolis: io_uring: revert create_io_thread share flag and restrict percpu_sq_thread share scope. ANBZ: #8960 We re-introduced the percpu_sq_thread feature in patch 8aefafb37 (anolis: io_uring: re-add sqthread percpu polling support). However, when using the readv/writev opcode, which needs to share usermode 'mm', it causes an error. The reason is that we should ensure the sq worker thread needs the SHARE_VM flag at its creation time. In the original patch, we avoid sharing it because we want this feature to share one sqthread between not only 'threads' but also 'processes'. Now it is confirmed that the larger sharing scope cannot be achieved in the current io_uring and io_thread design architecture. Thus, what we need to do is: 1.Just restore create_io_thread's implementation. 2.Forbid percpu_sq_thread sharing between processes. Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Reviewed-by: Yi Tao Link: https://gitee.com/anolis/cloud-kernel/pulls/3138 --- include/linux/sched/task.h | 3 +-- io_uring/io-wq.c | 4 ++-- io_uring/sqpoll.c | 17 ++++++----------- kernel/fork.c | 21 ++++++--------------- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d2d46728da3e..a23af225c898 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -94,8 +94,7 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *copy_process(struct pid *pid, int trace, int node, struct kernel_clone_args *args); -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, - bool unshare); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags); diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index fb7aca49d097..522196dfb0ff 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -771,7 +771,7 @@ static void create_worker_cont(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); clear_bit_unlock(0, &worker->create_state); wq = worker->wq; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); io_worker_release(worker); @@ -840,7 +840,7 @@ static bool create_io_worker(struct io_wq *wq, int index) if (index == IO_WQ_ACCT_BOUND) worker->flags |= IO_WORKER_F_BOUND; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); } else if (!io_should_retry_thread(PTR_ERR(tsk))) { diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index de494bebc84c..8aa416e6b289 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -193,6 +193,10 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, mutex_lock(&percpu_sqd_lock); sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); if (sqd) { + if (sqd->task_tgid != current->tgid) { + mutex_unlock(&percpu_sqd_lock); + return ERR_PTR(-EPERM); + } refcount_inc(&sqd->refs); mutex_unlock(&percpu_sqd_lock); *percpu_found = true; @@ -265,16 +269,8 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || signal_pending(current)) { mutex_unlock(&sqd->lock); - if (signal_pending(current)) { + if (signal_pending(current)) did_sig = get_signal(&ksig); - if (did_sig && sqd->sq_cpu != -1 && - refcount_read(&sqd->refs) != 0) { - mutex_lock(&percpu_sqd_lock); - if (*per_cpu_ptr(percpu_sqd, sqd->sq_cpu) == sqd) - did_sig = false; - mutex_unlock(&percpu_sqd_lock); - } - } cond_resched(); mutex_lock(&sqd->lock); sqd->sq_cpu = raw_smp_processor_id(); @@ -496,8 +492,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; - tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE, - !!(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)); + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; diff --git a/kernel/fork.c b/kernel/fork.c index 4474fbb6aa8b..603729ff4c69 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2851,22 +2851,13 @@ struct task_struct * __init fork_idle(int cpu) * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, - bool unshare) -{ - unsigned long flags = unshare ? 0 : (CLONE_FS|CLONE_FILES| - CLONE_SIGHAND|CLONE_THREAD| - CLONE_IO|CLONE_VM); - /* we use 'unshare' flag to try to create an independent io_thread, - * 'unshare' describes whether child share parent's mm directly (with - * refcount add one), or it should copy mm/files when copy_process(). - * By setting this flag, the io_thread won't share parent's mm - * directly, but can be shared among different tasks, and looks more - * reasonably. - */ +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO; struct kernel_clone_args args = { - .flags = ((lower_32_bits(flags) | CLONE_UNTRACED) - & ~CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, -- Gitee From b1fd962eb7ad90c8707920f0485bc3d0cbd21145 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:48:59 +0200 Subject: [PATCH 925/953] tools: Sync if_link uapi header ANBZ: #8818 commit 5c1b994de4be8a27afa3281be2ff58b38e8bc50c upstream. Sync if_link uapi header to the latest version as we need the refresher in tooling for netkit device. Given it's been a while since the last sync and the diff is fairly big, it has been done as its own commit. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-3-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau Signed-off-by: Yuanhe Shu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3143 --- tools/include/uapi/linux/if_link.h | 141 +++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 39e659c83cfd..a0aa05a28cf2 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,23 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; +}; + +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; }; /* The struct should be in sync with struct ifmap */ @@ -350,7 +370,13 @@ enum { IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, + IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ + + IFLA_DEVLINK_PORT, + IFLA_GSO_IPV4_MAX_SIZE, + IFLA_GRO_IPV4_MAX_SIZE, + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -539,6 +565,12 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, + IFLA_BRPORT_MAB, + IFLA_BRPORT_MCAST_N_GROUPS, + IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -716,7 +748,79 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -748,6 +852,8 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ + IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -781,6 +887,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -826,6 +933,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1162,6 +1271,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1179,10 +1299,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) @@ -1281,4 +1412,14 @@ enum { #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) +/* DSA section */ + +enum { + IFLA_DSA_UNSPEC, + IFLA_DSA_MASTER, + __IFLA_DSA_MAX, +}; + +#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ -- Gitee From 49e7e04370e0c472be82065819127ab82db85d7a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:03 +0200 Subject: [PATCH 926/953] selftests/bpf: Add netlink helper library ANBZ: #8818 commit 51f1892b5289f0c09745d3bedb36493555d6d90c upstream. Add a minimal netlink helper library for the BPF selftests. This has been taken and cut down and cleaned up from iproute2. This covers basics such as netdevice creation which we need for BPF selftests / BPF CI given iproute2 package cannot cover it yet. Stanislav Fomichev suggested that this could be replaced in future by ynl tool generated C code once it has RTNL support to create devices. Once we get to this point the BPF CI would also need to add libmnl. If no further extensions are needed, a second option could be that we remove this code again once iproute2 package has support. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-7-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau Signed-off-by: Yuanhe Shu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3143 --- tools/testing/selftests/bpf/Makefile | 19 +- tools/testing/selftests/bpf/netlink_helpers.c | 358 ++++++++++++++++++ tools/testing/selftests/bpf/netlink_helpers.h | 46 +++ 3 files changed, 418 insertions(+), 5 deletions(-) create mode 100644 tools/testing/selftests/bpf/netlink_helpers.c create mode 100644 tools/testing/selftests/bpf/netlink_helpers.h diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index caede9b574cb..aaaef437723f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -578,11 +578,20 @@ endef # Define test_progs test runner. TRUNNER_TESTS_DIR := prog_tests TRUNNER_BPF_PROGS_DIR := progs -TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ - network_helpers.c testing_helpers.c \ - btf_helpers.c flow_dissector_load.h \ - cap_helpers.c test_loader.c xsk.c disasm.c \ - json_writer.c unpriv_helpers.c \ +TRUNNER_EXTRA_SOURCES := test_progs.c \ + cgroup_helpers.c \ + trace_helpers.c \ + network_helpers.c \ + testing_helpers.c \ + btf_helpers.c \ + cap_helpers.c \ + unpriv_helpers.c \ + netlink_helpers.c \ + test_loader.c \ + xsk.c \ + disasm.c \ + json_writer.c \ + flow_dissector_load.h \ ip_check_defrag_frags.h TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ diff --git a/tools/testing/selftests/bpf/netlink_helpers.c b/tools/testing/selftests/bpf/netlink_helpers.c new file mode 100644 index 000000000000..caf36eb1d032 --- /dev/null +++ b/tools/testing/selftests/bpf/netlink_helpers.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Taken & modified from iproute2's libnetlink.c + * Authors: Alexey Kuznetsov, + */ +#include +#include +#include +#include +#include +#include + +#include "netlink_helpers.h" + +static int rcvbuf = 1024 * 1024; + +void rtnl_close(struct rtnl_handle *rth) +{ + if (rth->fd >= 0) { + close(rth->fd); + rth->fd = -1; + } +} + +int rtnl_open_byproto(struct rtnl_handle *rth, unsigned int subscriptions, + int protocol) +{ + socklen_t addr_len; + int sndbuf = 32768; + int one = 1; + + memset(rth, 0, sizeof(*rth)); + rth->proto = protocol; + rth->fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol); + if (rth->fd < 0) { + perror("Cannot open netlink socket"); + return -1; + } + if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF, + &sndbuf, sizeof(sndbuf)) < 0) { + perror("SO_SNDBUF"); + goto err; + } + if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF, + &rcvbuf, sizeof(rcvbuf)) < 0) { + perror("SO_RCVBUF"); + goto err; + } + + /* Older kernels may no support extended ACK reporting */ + setsockopt(rth->fd, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)); + + memset(&rth->local, 0, sizeof(rth->local)); + rth->local.nl_family = AF_NETLINK; + rth->local.nl_groups = subscriptions; + + if (bind(rth->fd, (struct sockaddr *)&rth->local, + sizeof(rth->local)) < 0) { + perror("Cannot bind netlink socket"); + goto err; + } + addr_len = sizeof(rth->local); + if (getsockname(rth->fd, (struct sockaddr *)&rth->local, + &addr_len) < 0) { + perror("Cannot getsockname"); + goto err; + } + if (addr_len != sizeof(rth->local)) { + fprintf(stderr, "Wrong address length %d\n", addr_len); + goto err; + } + if (rth->local.nl_family != AF_NETLINK) { + fprintf(stderr, "Wrong address family %d\n", + rth->local.nl_family); + goto err; + } + rth->seq = time(NULL); + return 0; +err: + rtnl_close(rth); + return -1; +} + +int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions) +{ + return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE); +} + +static int __rtnl_recvmsg(int fd, struct msghdr *msg, int flags) +{ + int len; + + do { + len = recvmsg(fd, msg, flags); + } while (len < 0 && (errno == EINTR || errno == EAGAIN)); + if (len < 0) { + fprintf(stderr, "netlink receive error %s (%d)\n", + strerror(errno), errno); + return -errno; + } + if (len == 0) { + fprintf(stderr, "EOF on netlink\n"); + return -ENODATA; + } + return len; +} + +static int rtnl_recvmsg(int fd, struct msghdr *msg, char **answer) +{ + struct iovec *iov = msg->msg_iov; + char *buf; + int len; + + iov->iov_base = NULL; + iov->iov_len = 0; + + len = __rtnl_recvmsg(fd, msg, MSG_PEEK | MSG_TRUNC); + if (len < 0) + return len; + if (len < 32768) + len = 32768; + buf = malloc(len); + if (!buf) { + fprintf(stderr, "malloc error: not enough buffer\n"); + return -ENOMEM; + } + iov->iov_base = buf; + iov->iov_len = len; + len = __rtnl_recvmsg(fd, msg, 0); + if (len < 0) { + free(buf); + return len; + } + if (answer) + *answer = buf; + else + free(buf); + return len; +} + +static void rtnl_talk_error(struct nlmsghdr *h, struct nlmsgerr *err, + nl_ext_ack_fn_t errfn) +{ + fprintf(stderr, "RTNETLINK answers: %s\n", + strerror(-err->error)); +} + +static int __rtnl_talk_iov(struct rtnl_handle *rtnl, struct iovec *iov, + size_t iovlen, struct nlmsghdr **answer, + bool show_rtnl_err, nl_ext_ack_fn_t errfn) +{ + struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK }; + struct iovec riov; + struct msghdr msg = { + .msg_name = &nladdr, + .msg_namelen = sizeof(nladdr), + .msg_iov = iov, + .msg_iovlen = iovlen, + }; + unsigned int seq = 0; + struct nlmsghdr *h; + int i, status; + char *buf; + + for (i = 0; i < iovlen; i++) { + h = iov[i].iov_base; + h->nlmsg_seq = seq = ++rtnl->seq; + if (answer == NULL) + h->nlmsg_flags |= NLM_F_ACK; + } + status = sendmsg(rtnl->fd, &msg, 0); + if (status < 0) { + perror("Cannot talk to rtnetlink"); + return -1; + } + /* change msg to use the response iov */ + msg.msg_iov = &riov; + msg.msg_iovlen = 1; + i = 0; + while (1) { +next: + status = rtnl_recvmsg(rtnl->fd, &msg, &buf); + ++i; + if (status < 0) + return status; + if (msg.msg_namelen != sizeof(nladdr)) { + fprintf(stderr, + "Sender address length == %d!\n", + msg.msg_namelen); + exit(1); + } + for (h = (struct nlmsghdr *)buf; status >= sizeof(*h); ) { + int len = h->nlmsg_len; + int l = len - sizeof(*h); + + if (l < 0 || len > status) { + if (msg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "Truncated message!\n"); + free(buf); + return -1; + } + fprintf(stderr, + "Malformed message: len=%d!\n", + len); + exit(1); + } + if (nladdr.nl_pid != 0 || + h->nlmsg_pid != rtnl->local.nl_pid || + h->nlmsg_seq > seq || h->nlmsg_seq < seq - iovlen) { + /* Don't forget to skip that message. */ + status -= NLMSG_ALIGN(len); + h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len)); + continue; + } + if (h->nlmsg_type == NLMSG_ERROR) { + struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h); + int error = err->error; + + if (l < sizeof(struct nlmsgerr)) { + fprintf(stderr, "ERROR truncated\n"); + free(buf); + return -1; + } + if (error) { + errno = -error; + if (rtnl->proto != NETLINK_SOCK_DIAG && + show_rtnl_err) + rtnl_talk_error(h, err, errfn); + } + if (i < iovlen) { + free(buf); + goto next; + } + if (error) { + free(buf); + return -i; + } + if (answer) + *answer = (struct nlmsghdr *)buf; + else + free(buf); + return 0; + } + if (answer) { + *answer = (struct nlmsghdr *)buf; + return 0; + } + fprintf(stderr, "Unexpected reply!\n"); + status -= NLMSG_ALIGN(len); + h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len)); + } + free(buf); + if (msg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "Message truncated!\n"); + continue; + } + if (status) { + fprintf(stderr, "Remnant of size %d!\n", status); + exit(1); + } + } +} + +static int __rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer, bool show_rtnl_err, + nl_ext_ack_fn_t errfn) +{ + struct iovec iov = { + .iov_base = n, + .iov_len = n->nlmsg_len, + }; + + return __rtnl_talk_iov(rtnl, &iov, 1, answer, show_rtnl_err, errfn); +} + +int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer) +{ + return __rtnl_talk(rtnl, n, answer, true, NULL); +} + +int addattr(struct nlmsghdr *n, int maxlen, int type) +{ + return addattr_l(n, maxlen, type, NULL, 0); +} + +int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u8)); +} + +int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u16)); +} + +int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u32)); +} + +int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u64)); +} + +int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *str) +{ + return addattr_l(n, maxlen, type, str, strlen(str)+1); +} + +int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, + int alen) +{ + int len = RTA_LENGTH(alen); + struct rtattr *rta; + + if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) { + fprintf(stderr, "%s: Message exceeded bound of %d\n", + __func__, maxlen); + return -1; + } + rta = NLMSG_TAIL(n); + rta->rta_type = type; + rta->rta_len = len; + if (alen) + memcpy(RTA_DATA(rta), data, alen); + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len); + return 0; +} + +int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len) +{ + if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) { + fprintf(stderr, "%s: Message exceeded bound of %d\n", + __func__, maxlen); + return -1; + } + + memcpy(NLMSG_TAIL(n), data, len); + memset((void *) NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len); + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len); + return 0; +} + +struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type) +{ + struct rtattr *nest = NLMSG_TAIL(n); + + addattr_l(n, maxlen, type, NULL, 0); + return nest; +} + +int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest) +{ + nest->rta_len = (void *)NLMSG_TAIL(n) - (void *)nest; + return n->nlmsg_len; +} diff --git a/tools/testing/selftests/bpf/netlink_helpers.h b/tools/testing/selftests/bpf/netlink_helpers.h new file mode 100644 index 000000000000..68116818a47e --- /dev/null +++ b/tools/testing/selftests/bpf/netlink_helpers.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef NETLINK_HELPERS_H +#define NETLINK_HELPERS_H + +#include +#include +#include + +struct rtnl_handle { + int fd; + struct sockaddr_nl local; + struct sockaddr_nl peer; + __u32 seq; + __u32 dump; + int proto; + FILE *dump_fp; +#define RTNL_HANDLE_F_LISTEN_ALL_NSID 0x01 +#define RTNL_HANDLE_F_SUPPRESS_NLERR 0x02 +#define RTNL_HANDLE_F_STRICT_CHK 0x04 + int flags; +}; + +#define NLMSG_TAIL(nmsg) \ + ((struct rtattr *) (((void *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len))) + +typedef int (*nl_ext_ack_fn_t)(const char *errmsg, uint32_t off, + const struct nlmsghdr *inner_nlh); + +int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions) + __attribute__((warn_unused_result)); +void rtnl_close(struct rtnl_handle *rth); +int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer) + __attribute__((warn_unused_result)); + +int addattr(struct nlmsghdr *n, int maxlen, int type); +int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data); +int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data); +int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data); +int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data); +int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *data); +int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen); +int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len); +struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type); +int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest); +#endif /* NETLINK_HELPERS_H */ -- Gitee From d098b40721906817a0a1e85becf3d51f174453ab Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 May 2024 09:37:33 +0800 Subject: [PATCH 927/953] anolis: check cgroup v1 for memcg_blkcg_tree operations ANBZ: #8973 Currently parameter 'cgwb_v1' can be setup unconditionally. Take the following abnormal case into consideration: System administrator configures both 'cgwb_v1' and 'systemd.unified_cgroup_hierarchy=1' in command line by mistake, so we use cgroup v2 after boot in fact. Though we'll check if current kernel is under cgroup v2 in inode_cgwb_enabled(), we still allocate, insert and delete links for memcg_blkcg_tree since we only check parameter 'cgwb_v1'. This seems no actual harm, but it is entirely unnecessary and wasty. So restrict these operations only under cgroup v1. Since bdi initialization is before enabling cgroup subsys, so we'll still create debug file bdi_wb_link but without any links in above abnormal case. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3144 --- mm/backing-dev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f032314fcbf2..67d71ce4472d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -564,7 +564,7 @@ int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) struct memcg_blkcg_link *link; int i; - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return 0; for (i = 0; i < count; i++) { @@ -594,7 +594,7 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *memcg_css; int err; - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return; if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) @@ -682,7 +682,7 @@ static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return; if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) -- Gitee From 03af4a2a6bd530f1bf2f8ffcca8c25024e087baf Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:35 +0000 Subject: [PATCH 928/953] perf evlist: Add perf_evlist__go_system_wide() helper ANBZ: #8889 commit f6ff1c760431be34e4daaa44f242be911becd998 upstream. For dummy events that keep tracking, we may need to modify its cpu_maps. For example, change the cpu_maps to record sideband events for all CPUS. Add perf_evlist__go_system_wide() helper to support this scenario. Signed-off-by: Yang Jihong Acked-by: Adrian Hunter Tested-by: Ravi Bangoria Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-2-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/lib/perf/evlist.c | 9 +++++++++ tools/lib/perf/include/internal/evlist.h | 2 ++ 2 files changed, 11 insertions(+) diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index b8b066d0dc5e..3acbbccc1901 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -738,3 +738,12 @@ int perf_evlist__nr_groups(struct perf_evlist *evlist) } return nr_groups; } + +void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel) +{ + if (!evsel->system_wide) { + evsel->system_wide = true; + if (evlist->needs_map_propagation) + __perf_evlist__propagate_maps(evlist, evsel); + } +} diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h index 3339bc2f1765..d86ffe8ed483 100644 --- a/tools/lib/perf/include/internal/evlist.h +++ b/tools/lib/perf/include/internal/evlist.h @@ -135,4 +135,6 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist, void perf_evlist__reset_id_hash(struct perf_evlist *evlist); void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader); + +void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel); #endif /* __LIBPERF_INTERNAL_EVLIST_H */ -- Gitee From 7a22213eb86283fcee6b462db8805cf075ebba85 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:36 +0000 Subject: [PATCH 929/953] perf evlist: Add evlist__findnew_tracking_event() helper ANBZ: #8889 commit 9c95e4ef065723496442898614d09a9a916eab81 upstream. Currently, intel-bts, intel-pt, and arm-spe may add tracking event to the evlist. We may need to search for the tracking event for some settings. Therefore, add evlist__findnew_tracking_event() helper. If system_wide is true, evlist__findnew_tracking_event() set the cpu map of the evsel to all online CPUs. Signed-off-by: Yang Jihong Acked-by: Adrian Hunter Tested-by: Ravi Bangoria Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-3-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/builtin-record.c | 11 +++-------- tools/perf/util/evlist.c | 18 ++++++++++++++++++ tools/perf/util/evlist.h | 1 + 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index c40460e936cc..1fb3cfce87a0 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1293,14 +1293,9 @@ static int record__open(struct record *rec) */ if (opts->target.initial_delay || target__has_cpu(&opts->target) || perf_pmus__num_core_pmus() > 1) { - pos = evlist__get_tracking_event(evlist); - if (!evsel__is_dummy_event(pos)) { - /* Set up dummy event. */ - if (evlist__add_dummy(evlist)) - return -ENOMEM; - pos = evlist__last(evlist); - evlist__set_tracking_event(evlist, pos); - } + pos = evlist__findnew_tracking_event(evlist, false); + if (!pos) + return -ENOMEM; /* * Enable the dummy event when the process is forked for diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 8bf537a29809..eb1dd29c538d 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1702,6 +1702,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev tracking_evsel->tracking = true; } +struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide) +{ + struct evsel *evsel; + + evsel = evlist__get_tracking_event(evlist); + if (!evsel__is_dummy_event(evsel)) { + evsel = evlist__add_aux_dummy(evlist, system_wide); + if (!evsel) + return NULL; + + evlist__set_tracking_event(evlist, evsel); + } else if (system_wide) { + perf_evlist__go_system_wide(&evlist->core, &evsel->core); + } + + return evsel; +} + struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) { struct evsel *evsel; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index d63486261fd2..cb91dc9117a2 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -387,6 +387,7 @@ bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr); struct evsel *evlist__get_tracking_event(struct evlist *evlist); void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); +struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide); struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str); -- Gitee From b321620873832466fe55a2cec45d8b07311cb5e1 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:37 +0000 Subject: [PATCH 930/953] perf record: Move setting tracking events before record__init_thread_masks() ANBZ: #8889 commit 1285ab300d598ead593b190af65a16f4b0843c68 upstream. User space tasks can migrate between CPUs, so when tracing selected CPUs, sideband for all CPUs is needed. In this case set the cpu map of the evsel to all online CPUs. This may modify the original cpu map of the evlist. Therefore, need to check whether the preceding scenario exists before record__init_thread_masks(). Dummy tracking has been set in record__open(), move it before record__init_thread_masks() and add a helper for unified processing. The sys_perf_event_open invoked is as follows: # perf --debug verbose=3 record -e cpu-clock -D 100 true Opening: cpu-clock ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0 (PERF_COUNT_SW_CPU_CLOCK) { sample_period, sample_freq } 4000 sample_type IP|TID|TIME|PERIOD|IDENTIFIER read_format ID|LOST disabled 1 inherit 1 freq 1 sample_id_all 1 exclude_guest 1 ------------------------------------------------------------ sys_perf_event_open: pid 10318 cpu 0 group_fd -1 flags 0x8 = 5 sys_perf_event_open: pid 10318 cpu 1 group_fd -1 flags 0x8 = 6 sys_perf_event_open: pid 10318 cpu 2 group_fd -1 flags 0x8 = 7 sys_perf_event_open: pid 10318 cpu 3 group_fd -1 flags 0x8 = 9 sys_perf_event_open: pid 10318 cpu 4 group_fd -1 flags 0x8 = 10 sys_perf_event_open: pid 10318 cpu 5 group_fd -1 flags 0x8 = 11 sys_perf_event_open: pid 10318 cpu 6 group_fd -1 flags 0x8 = 12 sys_perf_event_open: pid 10318 cpu 7 group_fd -1 flags 0x8 = 13 Opening: dummy:u ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0x9 (PERF_COUNT_SW_DUMMY) { sample_period, sample_freq } 1 sample_type IP|TID|TIME|IDENTIFIER read_format ID|LOST disabled 1 inherit 1 exclude_kernel 1 exclude_hv 1 mmap 1 comm 1 enable_on_exec 1 task 1 sample_id_all 1 exclude_guest 1 mmap2 1 comm_exec 1 ksymbol 1 bpf_event 1 ------------------------------------------------------------ sys_perf_event_open: pid 10318 cpu 0 group_fd -1 flags 0x8 = 14 sys_perf_event_open: pid 10318 cpu 1 group_fd -1 flags 0x8 = 15 sys_perf_event_open: pid 10318 cpu 2 group_fd -1 flags 0x8 = 16 sys_perf_event_open: pid 10318 cpu 3 group_fd -1 flags 0x8 = 17 sys_perf_event_open: pid 10318 cpu 4 group_fd -1 flags 0x8 = 18 sys_perf_event_open: pid 10318 cpu 5 group_fd -1 flags 0x8 = 19 sys_perf_event_open: pid 10318 cpu 6 group_fd -1 flags 0x8 = 20 sys_perf_event_open: pid 10318 cpu 7 group_fd -1 flags 0x8 = 21 'perf test' needs to update base-record & system-wide-dummy attr expected values for test-record-C0: 1. Because a dummy sideband event is added to the sampling of specified CPUs. When evlist contains evsel of different sample_type, evlist__config() will change the default PERF_SAMPLE_ID bit to PERF_SAMPLE_IDENTIFICATION bit. The attr sample_type expected value of base-record and system-wide-dummy in test-record-C0 needs to be updated. 2. The perf record uses evlist__add_aux_dummy() instead of evlist__add_dummy() to add a dummy event. The expected value of system-wide-dummy attr needs to be updated. The 'perf test' result is as follows: # ./perf test list 2>&1 | grep 'Setup struct perf_event_attr' 17: Setup struct perf_event_attr # ./perf test 17 17: Setup struct perf_event_attr : Ok Signed-off-by: Yang Jihong Acked-by: Adrian Hunter Tested-by: Ravi Bangoria Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-4-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/builtin-record.c | 59 ++++++++++++++++--------- tools/perf/tests/attr/system-wide-dummy | 14 +++--- tools/perf/tests/attr/test-record-C0 | 4 +- 3 files changed, 47 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 1fb3cfce87a0..99942542267e 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -906,6 +906,37 @@ static int record__config_off_cpu(struct record *rec) return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); } +static int record__config_tracking_events(struct record *rec) +{ + struct record_opts *opts = &rec->opts; + struct evlist *evlist = rec->evlist; + struct evsel *evsel; + + /* + * For initial_delay, system wide or a hybrid system, we need to add + * tracking event so that we can track PERF_RECORD_MMAP to cover the + * delay of waiting or event synthesis. + */ + if (opts->target.initial_delay || target__has_cpu(&opts->target) || + perf_pmus__num_core_pmus() > 1) { + evsel = evlist__findnew_tracking_event(evlist, false); + if (!evsel) + return -ENOMEM; + + /* + * Enable the tracking event when the process is forked for + * initial_delay, immediately for system wide. + */ + if (opts->target.initial_delay && !evsel->immediate && + !target__has_cpu(&opts->target)) + evsel->core.attr.enable_on_exec = 1; + else + evsel->immediate = 1; + } + + return 0; +} + static bool record__kcore_readable(struct machine *machine) { char kcore[PATH_MAX]; @@ -1286,28 +1317,6 @@ static int record__open(struct record *rec) struct record_opts *opts = &rec->opts; int rc = 0; - /* - * For initial_delay, system wide or a hybrid system, we need to add a - * dummy event so that we can track PERF_RECORD_MMAP to cover the delay - * of waiting or event synthesis. - */ - if (opts->target.initial_delay || target__has_cpu(&opts->target) || - perf_pmus__num_core_pmus() > 1) { - pos = evlist__findnew_tracking_event(evlist, false); - if (!pos) - return -ENOMEM; - - /* - * Enable the dummy event when the process is forked for - * initial_delay, immediately for system wide. - */ - if (opts->target.initial_delay && !pos->immediate && - !target__has_cpu(&opts->target)) - pos->core.attr.enable_on_exec = 1; - else - pos->immediate = 1; - } - evlist__config(evlist, opts, &callchain_param); evlist__for_each_entry(evlist, pos) { @@ -4169,6 +4178,12 @@ int cmd_record(int argc, const char **argv) goto out; } + err = record__config_tracking_events(rec); + if (err) { + pr_err("record__config_tracking_events failed, error %d\n", err); + goto out; + } + err = record__init_thread_masks(rec); if (err) { pr_err("Failed to initialize parallel data streaming masks\n"); diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy index 2f3e3eb728eb..a1e1d6a263bf 100644 --- a/tools/perf/tests/attr/system-wide-dummy +++ b/tools/perf/tests/attr/system-wide-dummy @@ -9,8 +9,10 @@ flags=8 type=1 size=136 config=9 -sample_period=4000 -sample_type=455 +sample_period=1 +# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | +# PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER +sample_type=65671 read_format=4|20 # Event will be enabled right away. disabled=0 @@ -18,12 +20,12 @@ inherit=1 pinned=0 exclusive=0 exclude_user=0 -exclude_kernel=0 -exclude_hv=0 +exclude_kernel=1 +exclude_hv=1 exclude_idle=0 mmap=1 comm=1 -freq=1 +freq=0 inherit_stat=0 enable_on_exec=0 task=1 @@ -32,7 +34,7 @@ precise_ip=0 mmap_data=0 sample_id_all=1 exclude_host=0 -exclude_guest=0 +exclude_guest=1 exclude_callchain_kernel=0 exclude_callchain_user=0 mmap2=1 diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0 index 317730b906dd..198e8429a1bf 100644 --- a/tools/perf/tests/attr/test-record-C0 +++ b/tools/perf/tests/attr/test-record-C0 @@ -10,9 +10,9 @@ cpu=0 enable_on_exec=0 # PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | -# PERF_SAMPLE_ID | PERF_SAMPLE_PERIOD +# PERF_SAMPLE_PERIOD | PERF_SAMPLE_IDENTIFIER # + PERF_SAMPLE_CPU added by -C 0 -sample_type=455 +sample_type=65927 # Dummy event handles mmaps, comm and task. mmap=0 -- Gitee From 5671b97c7a1511feb5cbf5384e66985df6548336 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:38 +0000 Subject: [PATCH 931/953] perf record: Track sideband events for all CPUs when tracing selected CPUs ANBZ: #8889 commit 74b4f3ecdf64b62446abfb36669b3d40a42d34eb upstream. User space tasks can migrate between CPUs, we need to track side-band events for all CPUs. The specific scenarios are as follows: CPU0 CPU1 perf record -C 0 start taskA starts to be created and executed -> PERF_RECORD_COMM and PERF_RECORD_MMAP events only deliver to CPU1 ...... | migrate to CPU0 | Running on CPU0 <----------/ ... perf record -C 0 stop Now perf samples the PC of taskA. However, perf does not record the PERF_RECORD_COMM and PERF_RECORD_MMAP events of taskA. Therefore, the comm and symbols of taskA cannot be parsed. The solution is to record sideband events for all CPUs when tracing selected CPUs. Because this modifies the default behavior, add related comments to the perf record man page. The sys_perf_event_open invoked is as follows: # perf --debug verbose=3 record -e cpu-clock -C 1 true Opening: cpu-clock ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0 (PERF_COUNT_SW_CPU_CLOCK) { sample_period, sample_freq } 4000 sample_type IP|TID|TIME|CPU|PERIOD|IDENTIFIER read_format ID|LOST disabled 1 inherit 1 freq 1 sample_id_all 1 exclude_guest 1 ------------------------------------------------------------ sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8 = 5 Opening: dummy:u ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0x9 (PERF_COUNT_SW_DUMMY) { sample_period, sample_freq } 1 sample_type IP|TID|TIME|CPU|IDENTIFIER read_format ID|LOST inherit 1 exclude_kernel 1 exclude_hv 1 mmap 1 comm 1 task 1 sample_id_all 1 exclude_guest 1 mmap2 1 comm_exec 1 ksymbol 1 bpf_event 1 ------------------------------------------------------------ sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8 = 6 sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8 = 7 sys_perf_event_open: pid -1 cpu 2 group_fd -1 flags 0x8 = 9 sys_perf_event_open: pid -1 cpu 3 group_fd -1 flags 0x8 = 10 sys_perf_event_open: pid -1 cpu 4 group_fd -1 flags 0x8 = 11 sys_perf_event_open: pid -1 cpu 5 group_fd -1 flags 0x8 = 12 sys_perf_event_open: pid -1 cpu 6 group_fd -1 flags 0x8 = 13 sys_perf_event_open: pid -1 cpu 7 group_fd -1 flags 0x8 = 14 Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-5-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/Documentation/perf-record.txt | 3 +++ tools/perf/builtin-record.c | 30 +++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index d5217be012d7..1889f66addf2 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -374,6 +374,9 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0- In per-thread mode with inheritance mode on (default), samples are captured only when the thread executes on the designated CPUs. Default is to monitor all CPUs. +User space tasks can migrate between CPUs, so when tracing selected CPUs, +a dummy event is created to track sideband for all CPUs. + -B:: --no-buildid:: Do not save the build ids of binaries in the perf.data files. This skips diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 99942542267e..5d86aa5ff536 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -906,10 +906,30 @@ static int record__config_off_cpu(struct record *rec) return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); } +static bool record__tracking_system_wide(struct record *rec) +{ + struct evlist *evlist = rec->evlist; + struct evsel *evsel; + + /* + * If non-dummy evsel exists, system_wide sideband is need to + * help parse sample information. + * For example, PERF_EVENT_MMAP event to help parse symbol, + * and PERF_EVENT_COMM event to help parse task executable name. + */ + evlist__for_each_entry(evlist, evsel) { + if (!evsel__is_dummy_event(evsel)) + return true; + } + + return false; +} + static int record__config_tracking_events(struct record *rec) { struct record_opts *opts = &rec->opts; struct evlist *evlist = rec->evlist; + bool system_wide = false; struct evsel *evsel; /* @@ -919,7 +939,15 @@ static int record__config_tracking_events(struct record *rec) */ if (opts->target.initial_delay || target__has_cpu(&opts->target) || perf_pmus__num_core_pmus() > 1) { - evsel = evlist__findnew_tracking_event(evlist, false); + + /* + * User space tasks can migrate between CPUs, so when tracing + * selected CPUs, sideband for all CPUs is still needed. + */ + if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) + system_wide = true; + + evsel = evlist__findnew_tracking_event(evlist, system_wide); if (!evsel) return -ENOMEM; -- Gitee From 9ff52ab5559f8cc8106c37b05248f0dcc03c94cc Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:39 +0000 Subject: [PATCH 932/953] perf test: Add test case for record sideband events ANBZ: #8889 commit 23b97c7ee963f1d007c035e76ba7e3a4fd1259e6 upstream. Add a new test case to record sideband events for all CPUs when tracing selected CPUs Test result: # ./perf test list 2>&1 | grep 'perf record sideband tests' 95: perf record sideband tests # ./perf test 95 95: perf record sideband tests : Ok Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-6-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/tests/shell/record_sideband.sh | 58 +++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 tools/perf/tests/shell/record_sideband.sh diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh new file mode 100755 index 000000000000..5024a7ce0c51 --- /dev/null +++ b/tools/perf/tests/shell/record_sideband.sh @@ -0,0 +1,58 @@ +#!/bin/sh +# perf record sideband tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +err=0 +perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) + +cleanup() +{ + rm -rf ${perfdata} + trap - EXIT TERM INT +} + +trap_cleanup() +{ + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +can_cpu_wide() +{ + if ! perf record -o ${perfdata} -BN --no-bpf-event -C $1 true 2>&1 >/dev/null + then + echo "record sideband test [Skipped cannot record cpu$1]" + err=2 + fi + + rm -f ${perfdata} + return $err +} + +test_system_wide_tracking() +{ + # Need CPU 0 and CPU 1 + can_cpu_wide 0 || return 0 + can_cpu_wide 1 || return 0 + + # Record on CPU 0 a task running on CPU 1 + perf record -BN --no-bpf-event -o ${perfdata} -C 0 -- taskset --cpu-list 1 true + + # Should get MMAP events from CPU 1 + mmap_cnt=`perf script -i ${perfdata} --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l` + + if [ ${mmap_cnt} -gt 0 ] ; then + return 0 + fi + + echo "Failed to record MMAP events on CPU 1 when tracing CPU 0" + return 1 +} + +test_system_wide_tracking + +cleanup +exit $err -- Gitee From 5ee17856f9dd6e26c60ec2cd75df68c63815c2d8 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:40 +0000 Subject: [PATCH 933/953] perf test: Add perf_event_attr test for record dummy event ANBZ: #8889 commit d50ad02cb39a5fe1d0c02b3b51e8a2a37464c54a upstream. If only dummy event is recorded, tracking event is not needed. Add this test scenario. Test result: # ./perf test list 2>&1 | grep 'Setup struct perf_event_attr' 17: Setup struct perf_event_attr # ./perf test 17 -v 17: Setup struct perf_event_attr : --- start --- test child forked, pid 720198 running './tests/attr/test-record-dummy-C0' test child finished with 0 ---- end ---- Setup struct perf_event_attr: Ok Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-7-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/tests/attr/test-record-dummy-C0 | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 tools/perf/tests/attr/test-record-dummy-C0 diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0 new file mode 100644 index 000000000000..83ca4e373acd --- /dev/null +++ b/tools/perf/tests/attr/test-record-dummy-C0 @@ -0,0 +1,55 @@ +[config] +command = record +args = --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1 +ret = 1 + +[event] +fd=1 +group_fd=-1 +cpu=0 +pid=-1 +flags=8 +type=1 +size=136 +config=9 +sample_period=4000 +# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | +# PERF_SAMPLE_PERIOD +# + PERF_SAMPLE_CPU added by -C 0 +sample_type=391 +read_format=4 +disabled=0 +inherit=1 +pinned=0 +exclusive=0 +exclude_user=0 +exclude_kernel=0 +exclude_hv=0 +exclude_idle=0 +mmap=1 +comm=1 +freq=1 +inherit_stat=0 +enable_on_exec=0 +task=1 +watermark=0 +precise_ip=0 +mmap_data=0 +sample_id_all=1 +exclude_host=0 +exclude_guest=1 +exclude_callchain_kernel=0 +exclude_callchain_user=0 +mmap2=1 +comm_exec=1 +context_switch=0 +write_backward=0 +namespaces=0 +use_clockid=0 +wakeup_events=0 +bp_type=0 +config1=0 +config2=0 +branch_sample_type=0 +sample_regs_user=0 +sample_stack_user=0 -- Gitee From 1f8dc42ec2c69b4ecc5e169c1f8f6a95d062b764 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Sat, 16 Sep 2023 09:16:41 +0000 Subject: [PATCH 934/953] perf test: Fix test-record-dummy-C0 failure for supported PERF_FORMAT_LOST feature kernel ANBZ: #8889 commit a132b784db68b543fd2745973cd8b5edf8e9bde4 upstream. For kernel that supports PERF_FORMAT_LOST, attr->read_format has PERF_FORMAT_LOST bit. Update expected value of attr->read_format of test-record-dummy-C0 for this scenario. Before: # ./perf test 17 -vv 17: Setup struct perf_event_attr : --- start --- test child forked, pid 1609441 running './tests/attr/test-record-dummy-C0' 'PERF_TEST_ATTR=/tmp/tmpm3s60aji ./perf record -o /tmp/tmpm3s60aji/perf.data --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1' ret '1', expected '1' expected read_format=4, got 20 FAILED './tests/attr/test-record-dummy-C0' - match failure test child finished with -1 ---- end ---- Setup struct perf_event_attr: FAILED! After: # ./perf test 17 -vv 17: Setup struct perf_event_attr : --- start --- test child forked, pid 1609441 running './tests/attr/test-record-dummy-C0' 'PERF_TEST_ATTR=/tmp/tmppa9vxcb7 ./perf record -o /tmp/tmppa9vxcb7/perf.data --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1' ret '1', expected '1' test child finished with 0 ---- end ---- Setup struct perf_event_attr: Ok Reported-and-Tested-by: Namhyung Kim Signed-off-by: Yang Jihong Link: https://lore.kernel.org/r/20230916091641.776031-1-yangjihong1@huawei.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3148 --- tools/perf/tests/attr/test-record-dummy-C0 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0 index 83ca4e373acd..576ec48b3aaf 100644 --- a/tools/perf/tests/attr/test-record-dummy-C0 +++ b/tools/perf/tests/attr/test-record-dummy-C0 @@ -17,7 +17,7 @@ sample_period=4000 # PERF_SAMPLE_PERIOD # + PERF_SAMPLE_CPU added by -C 0 sample_type=391 -read_format=4 +read_format=4|20 disabled=0 inherit=1 pinned=0 -- Gitee From 6d49f0de8c2f32f405ec970b00d37e249ec560cf Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 16 Apr 2024 20:15:13 +0800 Subject: [PATCH 935/953] anolis:ccp: ccp-crypto support sm2 on Hygon generation 4th CPU ANBZ: #8582 1. support sm2 on 4th cpu 2. create new ccp-dev-v5.c file for hygon ccp only 3. restore original ccp-dev-v5.c file Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-dev-v5.c | 326 +------ drivers/crypto/ccp/ccp-dev.h | 14 +- drivers/crypto/ccp/hygon/ccp-dev-v5.c | 1236 +++++++++++++++++++++++++ drivers/crypto/ccp/sp-pci.c | 27 +- 6 files changed, 1281 insertions(+), 326 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/ccp-dev-v5.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 702b4c6761fd..726f1a6025eb 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -50,6 +50,7 @@ config HYGON_GM bool "Hygon GM (sm2/sm3/sm4) Interface" default y depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + select CRYPTO_SM3_GENERIC help Hygon GM ccp driver diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 70bab9cbe3d5..69534353c8d4 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -5,7 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-dmaengine.o + ccp-dmaengine.o \ + hygon/ccp-dev-v5.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index e5c129c3e049..7b73332d6aa1 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,28 +131,6 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; - struct { - u16 rand:1; - u16 rsvd:11; - u16 mode:3; - } sm2; - struct { - u16 rsvd:10; - u16 type:4; - u16 rsvd2:1; - } sm3; - struct { - u16 rsvd:7; - u16 encrypt:1; - u16 mode:4; - u16 select:1; - u16 rsvd2:2; - } sm4; - struct { - u16 size:7; - u16 encrypt:1; - u16 step:7; - } sm4_ctr; u16 raw; }; @@ -173,15 +151,6 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) -#define CCP_SM2_RAND(p) ((p)->sm2.rand) -#define CCP_SM2_MODE(p) ((p)->sm2.mode) -#define CCP_SM3_TYPE(p) ((p)->sm3.type) -#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) -#define CCP_SM4_MODE(p) ((p)->sm4.mode) -#define CCP_SM4_SELECT(p) ((p)->sm4.select) -#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) -#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) -#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -217,8 +186,6 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) -#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) -#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -227,17 +194,6 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) -static inline unsigned int command_per_queue(void) -{ -#ifdef CONFIG_HYGON_GM - return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - HYGON_COMMANDS_PER_QUEUE : - COMMANDS_PER_QUEUE; -#else - return COMMANDS_PER_QUEUE; -#endif -} - static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -251,86 +207,15 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start, command_per_q; + u32 head_lo, queue_start; - command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + command_per_q - cmd_q->qidx - 1; + n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; - return n % command_per_q; /* Always one unused spot */ -} - -static int ccp5_do_multi_cmds(struct ccp5_desc *desc, - struct ccp_cmd_queue *cmd_q) -{ - u32 *mP; - __le32 *dP; - int i; - u32 command_per_q; - - command_per_q = command_per_queue(); - - cmd_q->total_ops++; - - if (CCP5_CMD_SOC(desc)) { - CCP5_CMD_IOC(desc) = 1; - CCP5_CMD_SOC(desc) = 0; - } - - mutex_lock(&cmd_q->q_mutex); - - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; - for (i = 0; i < 8; i++) - mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; - - mutex_unlock(&cmd_q->q_mutex); - - return 0; -} - -static int ccp5_do_run_cmd(struct ccp_op *op) -{ - struct ccp_cmd_queue *cmd_q = op->cmd_q; - u32 tail; - int ret = 0; - - mutex_lock(&cmd_q->q_mutex); - - /* The data used by this command must be flushed to memory */ - wmb(); - - /* Write the new tail address back to the queue register */ - tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); - iowrite32(tail, cmd_q->reg_tail_lo); - - /* Turn the queue back on using our cached control register */ - iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); - mutex_unlock(&cmd_q->q_mutex); - - if (op->ioc) { - /* Wait for the job to complete */ - ret = wait_event_interruptible(cmd_q->int_queue, - cmd_q->int_rcvd); - if (ret || cmd_q->cmd_error) { - /* Log the error and flush the queue by - * moving the head pointer - */ - if (cmd_q->cmd_error) - ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); - iowrite32(tail, cmd_q->reg_head_lo); - if (!ret) - ret = -EIO; - } - cmd_q->int_rcvd = 0; - } - - return ret; + return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -338,11 +223,10 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail, command_per_q; + u32 tail; int i; int ret = 0; - command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -356,7 +240,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; /* The data used by this command must be flushed to memory */ wmb(); @@ -700,163 +584,6 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } -static int ccp5_perform_sm2(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - struct ccp_dma_info *saddr = &op->src.u.dma; - struct ccp_dma_info *daddr = &op->dst.u.dma; - - op->cmd_q->total_sm2_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; - - CCP5_CMD_SOC(&desc) = 0; - CCP5_CMD_IOC(&desc) = 1; - CCP5_CMD_INIT(&desc) = 1; - CCP5_CMD_EOM(&desc) = 1; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM2_RAND(&function) = op->u.sm2.rand; - CCP_SM2_MODE(&function) = op->u.sm2.mode; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - /* Length of source data must match with mode */ - CCP5_CMD_LEN(&desc) = saddr->length; - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - return ccp5_do_cmd(&desc, op->cmd_q); -} - -static int ccp5_perform_sm3(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - - op->cmd_q->total_sm3_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM3_TYPE(&function) = op->u.sm3.type; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - if (op->eom) { - CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); - CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); - } - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; - CCP_SM4_MODE(&function) = op->u.sm4.mode; - CCP_SM4_SELECT(&function) = op->u.sm4.select; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4_ctr(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ctr_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; - CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; - CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -866,7 +593,6 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ - status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); @@ -1018,7 +744,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status & SUPPORTED_INTERRUPTS) { + if (status) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); @@ -1027,9 +753,10 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + cmd_q->int_rcvd = 1; + /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); - cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } @@ -1057,7 +784,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi, command_per_q, queue_size_val; + u32 status_lo, status_hi; int ret; /* Find available queues */ @@ -1074,9 +801,6 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } - command_per_q = command_per_queue(); - queue_size_val = QUEUE_SIZE_VAL(command_per_q); - for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1103,7 +827,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1190,7 +914,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); @@ -1338,26 +1062,6 @@ static void ccp5_destroy(struct ccp_device *ccp) } } -static int ccp5_get_trng_mask_param(void) -{ - /* According to spec description for SM4 high secure module, - * which need 64 bytes data, so the initialize times of writing - * mask register must be 16 or a multiple of 16. - * - * The AES algorithem need 48 bytes, so the initialize times will - * be 12 or a multiple of 12. - */ - -#ifdef CONFIG_HYGON_GM - /* for sm4 HS */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return 16; -#endif - - /* for AES HS */ - return 12; -} - static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1368,13 +1072,12 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; - int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < len; i++) { + for (i = 0; i < 12; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1400,11 +1103,6 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, - .sm2 = ccp5_perform_sm2, - .sm3 = ccp5_perform_sm3, - .sm4 = ccp5_perform_sm4, - .sm4_ctr = ccp5_perform_sm4_ctr, - .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index e1aa68f4044c..46518c80f8ca 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -101,13 +101,12 @@ #define CMD5_Q_SHIFT 3 #define COMMANDS_PER_QUEUE 16 -#define HYGON_COMMANDS_PER_QUEUE 8192 +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) #define Q_DESC_SIZE sizeof(struct ccp5_desc) - -#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) -#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) -#define Q_SIZE(c, n) ((c)*(n)) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 @@ -362,6 +361,9 @@ struct ccp_device { bool use_tasklet; struct tasklet_struct irq_tasklet; + /* This flag mark if the ccp support both sm2 and ecc function */ + uint32_t support_sm2_ecc; + /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. * The CMD_REQx registers and the Delete_Cmd_Queue_Job register @@ -709,5 +711,7 @@ extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; +extern const struct ccp_vdata ccpv5a_hygon; +extern const struct ccp_vdata ccpv5b_hygon; #endif diff --git a/drivers/crypto/ccp/hygon/ccp-dev-v5.c b/drivers/crypto/ccp/hygon/ccp-dev-v5.c new file mode 100644 index 000000000000..35e9fc5135d0 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ccp-dev-v5.c @@ -0,0 +1,1236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Depei Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "../ccp-dev.h" + +/* Allocate the requested number of contiguous LSB slots + * from the LSB bitmap. Look in the private range for this + * queue first; failing that, check the public area. + * If no space is available, wait around. + * Return: first slot number + */ +static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, + 0, count, 0); + if (start < LSB_SIZE) { + bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* No joy; try to get an entry from the shared blocks */ + ccp = cmd_q->ccp; + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, + count, 0); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + bitmap_set(ccp->lsbmap, start, count); + + mutex_unlock(&ccp->sb_mutex); + return start; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } +} + + +/* Free a number of LSB slots from the bitmap, starting at + * the indicated starting slot number. + */ +static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + if (!start) + return; + + if (cmd_q->lsb == start) { + /* An entry from the private LSB */ + bitmap_clear(cmd_q->lsbmap, start, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->ccp; + + mutex_lock(&ccp->sb_mutex); + bitmap_clear(ccp->lsbmap, start, count); + ccp->sb_avail = 1; + mutex_unlock(&ccp->sb_mutex); + wake_up_interruptible_all(&ccp->sb_queue); + } +} + +/* Hygon CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + u16 byteswap:2; + u16 bitwise:3; + u16 reflect:2; + u16 rsvd:8; + } pt; + struct { + u16 rand:1; + u16 rsvd:10; + u16 mode:3; + u16 ecc_mode:1; + } sm2_ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; + u16 raw; +}; + +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) + +/* For ccp support both sm2 and ecc */ +#define CCP_SM2_ECC_RAND(p) ((p)->sm2_ecc.rand) +#define CCP_SM2_ECC_MODE(p) ((p)->sm2_ecc.mode) +#define CCP_SM2_ECC_ECC_MODE(p) ((p)->sm2_ecc.ecc_mode) + +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) + +/* Word 0 */ +#define CCP5_CMD_DW0(p) ((p)->dw0) +#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) +#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) +#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) +#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) +#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) +#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) +#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP5_CMD_DW1(p) ((p)->length) +#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) + +/* Word 2 */ +#define CCP5_CMD_DW2(p) ((p)->src_lo) +#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) + +/* Word 3 */ +#define CCP5_CMD_DW3(p) ((p)->dw3) +#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP5_CMD_DW4(p) ((p)->dw4) +#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) +#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) +#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) + +/* Word 6/7 */ +#define CCP5_CMD_DW6(p) ((p)->key_lo) +#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) +#define CCP5_CMD_DW7(p) ((p)->dw7) +#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +#define CCP5_COMMANDS_PER_QUEUE 8192 +#define CCP5_QUEUE_SIZE_VAL ((ffs(CCP5_COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define CCP5_Q_PTR_MASK (2 << (CCP5_QUEUE_SIZE_VAL + 5) - 1) +#define CCP5_Q_SIZE(n) (CCP5_COMMANDS_PER_QUEUE * (n)) + +/* indicates whether there is ECC engine for Hygon CCP */ +#define RI_ECC_PRESENT 0x0400 + +/** + * Hygon CCP from 4th generation support both sm2 & ecc, + * but its input content is different from previous version. + * the previous requries only one src buffer which include + * hash + key. Now, hash and key should passed separately. To + * compatible with previous driver, we parse hash and key + * from src buffer which same as previous input + */ +#define SM2_ECC_OPERAND_LEN 32 +#define SM2_ECC_KG_SRC_SIZE 32 +#define SM2_ECC_LP_SRC_SIZE 32 +#define SM2_ECC_SIGN_SRC_SIZE 64 +#define SM2_ECC_VERIFY_SRC_SIZE 96 + +static inline int ccp5_get_keyinfo(struct ccp_op *op, dma_addr_t *kaddr, u32 *slen) +{ + struct ccp_dma_info *sinfo = &op->src.u.dma; + dma_addr_t saddr = sinfo->address + sinfo->offset; + int ret = 0; + + switch (op->u.sm2.mode) { + case CCP_SM2_MODE_SIGN: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_SIGN_SRC_SIZE; + break; + case CCP_SM2_MODE_VERIFY: + *kaddr = saddr + SM2_ECC_VERIFY_SRC_SIZE; + *slen = SM2_ECC_VERIFY_SRC_SIZE; + break; + case CCP_SM2_MODE_KG: + *kaddr = 0; /* unused for KG */ + *slen = SM2_ECC_KG_SRC_SIZE; + break; + case CCP_SM2_MODE_LP: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_LP_SRC_SIZE; + break; + default: + pr_err("Invalid sm2 operation, mode = %d\n", op->u.sm2.mode); + ret = -EINVAL; + break; + } + + return ret; +} + +static inline u32 low_address(unsigned long addr) +{ + return (u64)addr & 0x0ffffffff; +} + +static inline u32 high_address(unsigned long addr) +{ + return ((u64)addr >> 32) & 0x00000ffff; +} + +static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + unsigned int head_idx, n; + u32 head_lo, queue_start; + + queue_start = low_address(cmd_q->qdma_tail); + head_lo = ioread32(cmd_q->reg_head_lo); + head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); + + n = head_idx + CCP5_COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + + return n % CCP5_COMMANDS_PER_QUEUE; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_do_cmd(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + __le32 *mP; + u32 *dP; + u32 tail; + int i; + int ret = 0; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + mutex_lock(&cmd_q->q_mutex); + + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (CCP5_CMD_IOC(desc)) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + dma_addr_t kaddr; + unsigned int slen = saddr->length; + int ret = 0; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + + /* + * ccp support both sm2 and ecc, the rand,mode filed are different + * with previous, and run on ecc or sm2 also should be indicated + */ + if (op->cmd_q->ccp->support_sm2_ecc) { + ret = ccp5_get_keyinfo(op, &kaddr, &slen); + if (ret) + return ret; + + CCP_SM2_ECC_RAND(&function) = op->u.sm2.rand; + CCP_SM2_ECC_MODE(&function) = op->u.sm2.mode; + CCP_SM2_ECC_ECC_MODE(&function) = 0; /* 0: SM2 1: ECC */ + } else { + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + } + + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = slen; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->cmd_q->ccp->support_sm2_ecc && + op->u.sm2.mode != CCP_SM2_MODE_KG) { + CCP5_CMD_KEY_LO(&desc) = low_address(kaddr); + CCP5_CMD_KEY_HI(&desc) = high_address(kaddr); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_passthru(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + + op->cmd_q->total_pt_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; + CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data is always 256 bytes */ + if (op->src.type == CCP_MEMTYPE_SYSTEM) + CCP5_CMD_LEN(&desc) = saddr->length; + else + CCP5_CMD_LEN(&desc) = daddr->length; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP5_CMD_LSB_ID(&desc) = op->sb_key; + } else { + u32 key_addr = op->src.u.sb * CCP_SB_BYTES; + + CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_SRC_HI(&desc) = 0; + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } else { + u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; + + CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_DST_HI(&desc) = 0; + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_aes(struct ccp_op *op) +{ + pr_err("AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_xts_aes(struct ccp_op *op) +{ + pr_err("XTS-AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_sha(struct ccp_op *op) +{ + pr_err("SHA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_des3(struct ccp_op *op) +{ + pr_err("DES3 function not implement!"); + return -EPERM; +} + +static int ccp5_perform_rsa(struct ccp_op *op) +{ + pr_err("RSA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_ecc(struct ccp_op *op) +{ + pr_err("ECC function not implement!"); + return -EPERM; +} + +static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) +{ + int q_mask = 1 << cmd_q->id; + int queues = 0; + int j; + + /* Build a bit mask to know which LSBs this queue has access to. + * Don't bother with segment 0 as it has special privileges. + */ + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + bitmap_set(cmd_q->lsbmask, j, 1); + status >>= LSB_REGION_WIDTH; + } + queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", + cmd_q->id, queues); + + return queues ? 0 : -EINVAL; +} + +static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int bitno; + int qlsb_wgt; + int i; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + + if (qlsb_wgt == lsb_cnt) { + bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); + + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + if (test_bit(bitno, lsb_pub)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + bitmap_clear(lsb_pub, bitno, 1); + dev_dbg(ccp->dev, + "Queue %d gets LSB %d\n", + i, bitno); + break; + } + bitmap_clear(qlsb, bitno, 1); + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared resources. + */ +static int ccp_assign_lsbs(struct ccp_device *ccp) +{ + DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + bitmap_zero(lsb_pub, MAX_LSB_CNT); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + bitmap_or(lsb_pub, + lsb_pub, ccp->cmd_q[i].lsbmask, + MAX_LSB_CNT); + + n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; + n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); + + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + bitmap_set(qlsb, bitno, 1); + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status & SUPPORTED_INTERRUPTS) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp5_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + + ccp5_disable_queue_interrupts(ccp); + ccp->total_interrupts++; + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp5_irq_bh((unsigned long)ccp); + return IRQ_HANDLED; +} + +static int ccp5_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, i; + u64 status; + u32 status_lo, status_hi; + int ret; + + /* Find available queues */ + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + /* + * Check for a access to the registers. If this read returns + * 0xffffffff, it's likely that the system is running a broken + * BIOS which disallows access to the device. Stop here and fail + * the initialization (but not the load, as the PSP could get + * properly initialized). + */ + if (qmr == 0xffffffff) { + dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n"); + return 1; + } + + /* check if ccp support both sm2 and ecc. */ + ccp->support_sm2_ecc = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) + & RI_ECC_PRESENT); + + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + mutex_init(&cmd_q->q_mutex); + + /* Page alignment satisfies our needs for N <= 128 */ + BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); + cmd_q->qsize = CCP5_Q_SIZE(Q_DESC_SIZE); + cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); + if (!cmd_q->qbase) { + dev_err(dev, "unable to allocate command queue\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q->qidx = 0; + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_control = ccp->io_regs + + CMD5_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + + CMD5_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + + CMD5_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + + CMD5_Q_INT_STATUS_BASE; + cmd_q->reg_dma_status = cmd_q->reg_control + + CMD5_Q_DMA_STATUS_BASE; + cmd_q->reg_dma_read_status = cmd_q->reg_control + + CMD5_Q_DMA_READ_STATUS_BASE; + cmd_q->reg_dma_write_status = cmd_q->reg_control + + CMD5_Q_DMA_WRITE_STATUS_BASE; + + init_waitqueue_head(&cmd_q->int_queue); + + dev_dbg(dev, "queue #%u available\n", i); + } + + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = 1; + goto e_pool; + } + + /* Turn off the queues and disable interrupts until ready */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol = 0; /* Start with nothing */ + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + } + + dev_dbg(dev, "Requesting an IRQ...\n"); + /* Request an irq */ + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + /* Initialize the ISR tasklet */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, + (unsigned long)ccp); + + dev_dbg(dev, "Loading LSB map...\n"); + /* Copy the private LSB mask to the public registers */ + status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); + iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); + status = ((u64)status_hi<<30) | (u64)status_lo; + + dev_dbg(dev, "Configuring virtual queues...\n"); + /* Configure size of each virtual queue accessible to host */ + for (i = 0; i < ccp->cmd_q_count; i++) { + u32 dma_addr_lo; + u32 dma_addr_hi; + + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); + cmd_q->qcontrol |= CCP5_QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + + cmd_q->qdma_tail = cmd_q->qbase_dma; + dma_addr_lo = low_address(cmd_q->qdma_tail); + iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); + iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); + + dma_addr_hi = high_address(cmd_q->qdma_tail); + cmd_q->qcontrol |= (dma_addr_hi << 16); + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Find the LSB regions accessible to the queue */ + ccp_find_lsb_regions(cmd_q, status); + cmd_q->lsb = -1; /* Unassigned value */ + } + + dev_dbg(dev, "Assigning LSBs...\n"); + ret = ccp_assign_lsbs(ccp); + if (ret) { + dev_err(dev, "Unable to assign LSBs (%d)\n", ret); + goto e_irq; + } + + /* Optimization: pre-allocate LSB slots for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + } + + dev_dbg(dev, "Starting threads...\n"); + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + } + + dev_dbg(dev, "Enabling interrupts...\n"); + ccp5_enable_queue_interrupts(ccp); + + dev_dbg(dev, "Registering device...\n"); + /* Put this on the unit list to make it available */ + ccp_add_device(ccp); + + ret = ccp_register_rng(ccp); + if (ret) + goto e_kthread; + + /* Register the DMA engine support */ + ret = ccp_dmaengine_register(ccp); + if (ret) + goto e_hwrng; + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* Set up debugfs entries */ + ccp5_debugfs_setup(ccp); +#endif + + return 0; + +e_hwrng: + ccp_unregister_rng(ccp); + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + +e_irq: + sp_free_ccp_irq(ccp->sp, ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp5_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int i; + + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + ccp_unregister_rng(ccp); + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* We're in the process of tearing down the entire driver; + * when all the devices are gone clean up debugfs + */ + if (ccp_present()) + ccp5_debugfs_destroy(); +#endif + + /* Disable and clear interrupts */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + /* Turn off the run bit */ + iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + sp_free_ccp_irq(ccp->sp, ccp); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static void ccp5_config(struct ccp_device *ccp) +{ + /* Public side */ + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); +} + +static void ccp5other_config(struct ccp_device *ccp) +{ + int i; + u32 rnd; + + /* We own all of the queues on the NTB CCP */ + + iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); + iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); + + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + for (i = 0; i < 16; i++) { + rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); + iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); + } + + iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); + iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); + iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); + + iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + + iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); + + ccp5_config(ccp); +} + +/* Version 5 adds some function, but is essentially the same as v5 */ +static const struct ccp_actions ccp5_actions = { + .aes = ccp5_perform_aes, + .xts_aes = ccp5_perform_xts_aes, + .sha = ccp5_perform_sha, + .des3 = ccp5_perform_des3, + .rsa = ccp5_perform_rsa, + .passthru = ccp5_perform_passthru, + .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, + .sballoc = ccp_lsb_alloc, + .sbfree = ccp_lsb_free, + .init = ccp5_init, + .destroy = ccp5_destroy, + .get_free_slots = ccp5_get_free_slots, +}; + +const struct ccp_vdata ccpv5a_hygon = { + .version = CCP_VERSION(5, 1), + .setup = ccp5_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; + +const struct ccp_vdata ccpv5b_hygon = { + .version = CCP_VERSION(5, 1), + .dma_chan_attr = DMA_PRIVATE, + .setup = ccp5other_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 4f6a0507f7cd..8c5a34019aa2 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -594,7 +594,22 @@ static const struct sp_dev_vdata dev_vdata[] = { { /* 9 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a, + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv1, +#endif + }, + { /* 10 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b_hygon, +#endif + }, + { /* 11 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &psp_csvv1, @@ -612,11 +627,11 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, - { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, - { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, - { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[9] }, - { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[2] }, - { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[10] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[11] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[10] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[11] }, /* Last entry must be zero */ { 0, } }; -- Gitee From ae79e0b471c0b1f05e463d86c982dcfdfe512fee Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:17:41 +0800 Subject: [PATCH 936/953] anolis: Add mediated ccp driver support for hygon crypto technology. ANBZ: #8582 Add the hct.ko driver module to support the HYGON Cryptography Technology (HCT) Engine, which also supports CCP virtualization. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + drivers/crypto/ccp/Kconfig | 13 + drivers/crypto/ccp/Makefile | 2 + drivers/crypto/ccp/hygon/hct.c | 2185 +++++++++++++++++++++++ 5 files changed, 2202 insertions(+) create mode 100644 drivers/crypto/ccp/hygon/hct.c diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ff275d4fdbc1..58c6d3184e13 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7459,6 +7459,7 @@ CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m +CONFIG_CRYPTO_DEV_HCT=m # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 753475d33452..f04bbb4b7d28 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7450,6 +7450,7 @@ CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m +CONFIG_CRYPTO_DEV_HCT=m # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 726f1a6025eb..7115bf3028d4 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -69,6 +69,19 @@ config TDM_DEV_HYGON help Hygon TDM driver +config CRYPTO_DEV_HCT + tristate "HCT CCP device" + default m + depends on X86_64 + select VFIO_MDEV + help + Provides hygon crypto technology ccp device driver. + Support virtualize ccp devices based on mediated devices. + Support multi-process and virtual machines. + Support host-noiommu mode memory encryption function. + Support compiling hct.ko when mdev module is disabled. + If you choose 'M' here, this module will be called hct ccp. + config TDM_KERNEL_GUARD tristate "Hygon TDM kernel guard" default y diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 69534353c8d4..88086af2412e 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -19,6 +19,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o +obj-$(CONFIG_CRYPTO_DEV_HCT) += hygon/hct.o + obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c new file mode 100644 index 000000000000..dd386fec2b07 --- /dev/null +++ b/drivers/crypto/ccp/hygon/hct.c @@ -0,0 +1,2185 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2022 HYGON Corporation . All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * VERSION_STRING modification instructions: + * 0.1 -- support hct/mdev mode. + * 0.2 -- supoort qemu virtualization. + */ + +#undef pr_fmt +#define pr_fmt(fmt) "hct: " fmt + +#define VERSION_STRING "0.2" +#define DRIVER_AUTHOR "HYGON Corporation" +#define VERSION_SIZE 16 + +#define MCCP_CLASS_NAME "hct" +#define MCCP_NAME "hct" +#define MCCP_STRING_LEN 16 + +#define MCCP_CONFIG_SPACE_SIZE 0xff + +#define MCCP_VFIO_PCI_OFFSET_SHIFT 40 +#define MCCP_VFIO_PCI_OFFSET_TO_INDEX(off) \ + (off >> MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_INDEX_TO_OFFSET(index) \ + ((u64)(index) << MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_OFFSET_MASK \ + (((u64)(1) << MCCP_VFIO_PCI_OFFSET_SHIFT) - 1) +#define vdev_to_mdev_state(vdev) \ + container_of((vdev), struct mdev_state, vdev) + +#define MCCP_SHARE_IOC_TYPE 'C' +#define MCCP_SHARE_OP 0x01 +#define MCCP_SHARE_OP_DMA_MAP 0x01 +#define MCCP_SHARE_OP_DMA_UNMAP_ALL 0x02 +#define MCCP_SHARE_OP_GET_ID 0x03 +#define MCCP_SHARE_OP_GET_PASID 0x04 +#define MCCP_SHARE_OP_DMA_UNMAP 0x05 +#define MCCP_SHARE_OP_GET_VERSION 0x06 + +#define MCCP_SHARE_IOMMU_MAGIC 0x3d6a9c5728633b9e + +#define PCI_RESOURCE_BAR2 2 +#define MCCP_DEV_ID_SIZE 8 + +/* fixed iova range for ccp dma. */ +#define MCCP_DMA_IOVA_OFFSET 0 +#define MCCP_DMA_IOVA_SIZE (1ul << 30) + +#define MCCP_INSTANCE_MAX 1024 +#define MCCP_INSTANCE_OFFSET 8 +#define MCCP_INSTANCE_MASK (~((1u << MCCP_INSTANCE_OFFSET) - 1)) +#define MCCP_PASID_SIZE (1 << 8) +#define MCCP_IOVA_MAX_SLOT 1024 +#define MCCP_DEV_MAX 16 +#define MCCP_DEV_QUEUE_MAX 8 +#define MCCP_DEV_QUEUE 5 +#define MCCP_QUEUES_MAX (MCCP_DEV_MAX * MCCP_DEV_QUEUE_MAX) +#define MCCP_QUEUE_NEED_INIT 0x01 +#define MCCP_SHARED_SIZE (MCCP_DEV_MAX * PAGE_SIZE) + +#define MCCP_MSIX_ENTRY_SIZE 2 +#define MCCP_NTB_VECTOR_NUM 1 +#define MCCP_PSP_VECTOR_NUM 2 +#define MCCP_GET_QUEUE_FLAG (0x55) +#define MCCP_PUT_QUEUE_FLAG (0xAA) +#define IRQ_EVENT_SIGNAL (1UL) +#define IRQ_EVENT_SIGFAL (0xFF) + +#define Q_MASK_REG 0x0000 +#define MCMD_Q_STATUS_INCR 0x1000 +#define MCMD_Q_TAIL_LO_BASE 0x0004 +#define MCMD_Q_HEAD_LO_BASE 0x0008 +#define MCMD_Q_INT_ENABLE_BASE 0x000C +#define MCMD_Q_INTERRUPT_STATUS_BASE 0x0010 +#define MCMD_Q_STATUS_BASE 0x0100 +#define MCMD_Q_INT_STATUS_BASE 0x0104 + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define INT_EMPTY_QUEUE 0x8 +#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR) +#define MCMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) + +#define PHY_ADDR_MASK 0x7FFFFFFFFFFF + +struct hct_shared_cfg { + unsigned int iova_slot[MCCP_IOVA_MAX_SLOT]; + unsigned int ccp_queue_state[MCCP_QUEUES_MAX]; + unsigned int ccps_ref[MCCP_DEV_MAX]; + unsigned int ccps_ref_lock; + int rsvd1[15]; + u64 qidx[MCCP_QUEUES_MAX]; + unsigned int ccp_state[MCCP_DEV_MAX]; +} __aligned(PAGE_SIZE); + +struct hct_dev_ctrl { + unsigned char op; + unsigned char rsvd[3]; + union { + unsigned char version[VERSION_SIZE]; + unsigned int id; + struct { + unsigned long vaddr; + unsigned long iova; + unsigned long size; + }; + }; +}; + +struct hct_dma { + struct list_head next; + unsigned long vaddr; + unsigned long iova; + size_t size; + struct page **pages; + unsigned long npages; + unsigned int pfnmap_flag; +}; + +/* record the register address related to interrupt */ +struct hct_cmd_queue { + void __iomem *reg_control; + void __iomem *reg_tail_lo; + void __iomem *reg_head_lo; + void __iomem *reg_int_enable; + void __iomem *reg_interrupt_status; + void __iomem *reg_status; + void __iomem *reg_int_status; + struct mutex q_lock; + DECLARE_KFIFO_PTR(ectx_fifo, struct eventfd_ctx *); +} ____cacheline_aligned; + +struct hct_dev_ctx { + struct hct_cmd_queue cmd_q[MCCP_DEV_QUEUE_MAX]; + struct tasklet_struct irq_tasklet; + char devname[MCCP_STRING_LEN]; + void __iomem *io_regs; /* for BAR2 memory address */ + u32 q_count; + int irq; +} ____cacheline_aligned; + +struct hct_iommu { + unsigned long magic; + struct mutex lock; + struct pci_dev *pdev; + struct hct_dev_ctx dev_ctx; + unsigned long id; + unsigned long ref; +}; + +static struct hct_data { + struct hct_iommu iommu[MCCP_DEV_MAX]; + struct mutex lock; + unsigned long bitmap; + struct iommu_domain *domain; + int prot; + dma_addr_t dma_share_iova; + size_t dma_share_size; + unsigned long dma_share_ref; + unsigned long mdev_ref; + unsigned long ids[BITS_TO_LONGS(MCCP_INSTANCE_MAX)]; +} hct_data; + +static struct hct_share_cfg { + long ref; + struct mutex lock; + struct page *pages[MCCP_DEV_MAX]; + u64 pagecount; + void *vaddr; + u64 size; +} hct_share; + +static struct hct_dev { + dev_t vd_devt; + struct class *vd_class; + struct cdev vd_cdev; + struct device dev; + struct mdev_parent mdev_parent; +} hct_dev; + +struct mdev_region_info { + u64 start; + u64 phys_start; + u32 size; + u64 vfio_offset; +}; + +struct mdev_state { + struct vfio_device vdev; + struct mutex ops_lock; + struct mdev_device *mdev; + struct hct_iommu *iommu; + struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; + struct list_head next; + struct vfio_device_info dev_info; + unsigned long ref; + struct eventfd_ctx *trigger[MCCP_DEV_QUEUE_MAX]; + u8 efd_start; + u8 efd_count; +}; + +struct mdev_type hct_mdev_type = { + .sysfs_name = "1", + .pretty_name = "hct mdev type" +}; +struct mdev_type *hct_mdev_types[] = { + &hct_mdev_type +}; + +static void hct_cmd_queue_enable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_disable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(0x00, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_intr_task(unsigned long data) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)data; + u32 i, err, status; + + hct_cmd_queue_disable_interrupt(dev_ctx); + + for (i = 0; i < dev_ctx->q_count; i++) { + struct hct_cmd_queue *cmd_q = &dev_ctx->cmd_q[i]; + struct eventfd_ctx *trigger; + + status = ioread32(cmd_q->reg_interrupt_status); + if (status) { + if (status & INT_ERROR) { + /* print interrupt numbers for debug */ + err = ioread32(cmd_q->reg_status); + pr_err("Irq fail, errcode = %d.\n", MCMD_Q_ERROR(err)); + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGFAL); + } else { + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGNAL); + } + + iowrite32(status, cmd_q->reg_interrupt_status); + } + } + + hct_cmd_queue_enable_interrupt(dev_ctx); +} + +static irqreturn_t hct_cmd_queue_intr_handler(int irq, void *arg) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)arg; + + tasklet_schedule(&dev_ctx->irq_tasklet); + return IRQ_HANDLED; +} + +static int hct_dev_cmd_queue_init(struct pci_dev *pdev, struct hct_dev_ctx *dev_ctx, int idx) +{ + struct hct_cmd_queue *cmd_q; + unsigned long addr, len; + unsigned int retval, qmr; + int i, ret; + + if (!pdev || !dev_ctx) + return -EINVAL; + + memset(dev_ctx, 0, sizeof(*dev_ctx)); + + ret = pci_enable_device(pdev); + if (ret) + return -EINVAL; + + addr = pci_resource_start(pdev, PCI_RESOURCE_BAR2); + len = pci_resource_len(pdev, PCI_RESOURCE_BAR2); + dev_ctx->io_regs = ioremap(addr, len); + if (!dev_ctx->io_regs) + return -ENOMEM; + + pci_set_master(pdev); + retval = pci_alloc_irq_vectors(pdev, 1, MCCP_MSIX_ENTRY_SIZE, PCI_IRQ_MSIX); + if (retval != MCCP_NTB_VECTOR_NUM && retval != MCCP_PSP_VECTOR_NUM) + return -ENOMEM; + + snprintf(dev_ctx->devname, MCCP_STRING_LEN, "hct-ccp-%d", idx); + dev_ctx->irq = pci_irq_vector(pdev, retval - 1); + /* To request_irq, the fourth parameter dev_name must be global + * variable or static variable. + */ + ret = request_irq(dev_ctx->irq, hct_cmd_queue_intr_handler, 0, dev_ctx->devname, dev_ctx); + if (ret) { + pci_free_irq_vectors(pdev); + dev_ctx->irq = 0; + return ret; + } + + tasklet_init(&dev_ctx->irq_tasklet, hct_cmd_queue_intr_task, (unsigned long)dev_ctx); + + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + if (qmr == 0) { + iowrite32(0x1f, dev_ctx->io_regs + Q_MASK_REG); + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + } + for (i = 0; i < MCCP_DEV_QUEUE_MAX; i++) { + if (!(qmr & (1 << i))) + continue; + + cmd_q = &dev_ctx->cmd_q[dev_ctx->q_count++]; + + mutex_init(&cmd_q->q_lock); + ret = kfifo_alloc(&cmd_q->ectx_fifo, MCCP_INSTANCE_MAX, GFP_KERNEL); + if (ret) + return -ENOMEM; + + cmd_q->reg_control = dev_ctx->io_regs + MCMD_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + MCMD_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + MCMD_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + MCMD_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + MCMD_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + MCMD_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + MCMD_Q_INT_STATUS_BASE; + } + + return (dev_ctx->q_count >= 0) ? 0 : -1; +} + +static int hct_iommu_alloc(struct pci_dev *pdev) +{ + unsigned long i; + int ret = -EINVAL; + + mutex_lock(&hct_data.lock); + + i = find_first_zero_bit(&hct_data.bitmap, MCCP_DEV_MAX); + if (i != MCCP_DEV_MAX) + bitmap_set(&hct_data.bitmap, i, 1); + + if (device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) + hct_data.prot |= IOMMU_CACHE; + + mutex_unlock(&hct_data.lock); + + if (i == MCCP_DEV_MAX) + return -EINVAL; + + ret = iommu_attach_device(hct_data.domain, &pdev->dev); + if (ret) { + mutex_lock(&hct_data.lock); + bitmap_clear(&hct_data.bitmap, i, 1); + mutex_unlock(&hct_data.lock); + } else { + mutex_lock(&hct_data.iommu[i].lock); + hct_data.iommu[i].pdev = pdev; + hct_data.iommu[i].id = i; + hct_data.iommu[i].ref = 0; + hct_data.iommu[i].magic = MCCP_SHARE_IOMMU_MAGIC; + pci_set_drvdata(pdev, &hct_data.iommu[i]); + + ret = hct_dev_cmd_queue_init(pdev, &hct_data.iommu[i].dev_ctx, i); + mutex_unlock(&hct_data.iommu[i].lock); + } + + return ret; +} + +static void hct_iommu_free(struct hct_iommu *iommu) +{ + struct iommu_domain *domain; + + if (!iommu || iommu->magic != MCCP_SHARE_IOMMU_MAGIC) + return; + + domain = iommu_get_domain_for_dev(&iommu->pdev->dev); + + mutex_lock(&iommu->lock); + if (iommu->pdev && domain == hct_data.domain) + iommu_detach_device(domain, &iommu->pdev->dev); + iommu->pdev = NULL; + iommu->magic = 0; + mutex_unlock(&iommu->lock); + + mutex_lock(&hct_data.lock); + if (iommu->id < MCCP_DEV_MAX) + bitmap_clear(&hct_data.bitmap, iommu->id, 1); + mutex_unlock(&hct_data.lock); +} + +static int handle_pci_cfg_read(struct mdev_state *mdev_state, int offset, + __le32 *val, int count) +{ + u32 tmp_val = 0; + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) { + u8 tmp; + + ret = pci_user_read_config_byte(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 2) { + u16 tmp; + + ret = pci_user_read_config_word(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 4) + ret = pci_user_read_config_dword(pdev, offset, &tmp_val); + + *val = cpu_to_le32(tmp_val); + + return ret; +} + +static int handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, + u8 *buf, u32 count) +{ + u32 tmp_val = le32_to_cpu(*(u32 *)buf); + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) + ret = pci_user_write_config_byte(pdev, offset, tmp_val); + else if (count == 2) + ret = pci_user_write_config_word(pdev, offset, tmp_val); + else if (count == 4) + ret = pci_user_write_config_dword(pdev, offset, tmp_val); + + return ret; +} + +static ssize_t hct_access(struct mdev_device *mdev, u8 *buf, size_t count, + loff_t pos, bool is_write) +{ + struct mdev_state *mdev_state; + unsigned int index; + loff_t offset; + int ret = 0; + + if (!mdev || !buf) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) { + pr_err("%s mdev_state not found\n", __func__); + return -EINVAL; + } + + mutex_lock(&mdev_state->ops_lock); + + index = MCCP_VFIO_PCI_OFFSET_TO_INDEX(pos); + offset = pos & MCCP_VFIO_PCI_OFFSET_MASK; + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + if (is_write) + ret = handle_pci_cfg_write(mdev_state, offset, buf, count); + else + ret = handle_pci_cfg_read(mdev_state, offset, (__le32 *)buf, count); + break; + default: + ret = -1; + } + + if (!ret) + ret = count; + + mutex_unlock(&mdev_state->ops_lock); + + return ret; +} + +static int hct_mdev_state_init(struct mdev_state *mdev_state) +{ + unsigned long *bitmap = &hct_data.bitmap; + struct hct_iommu *iommu = hct_data.iommu; + unsigned long ref = -1ul; + int i, n = -1; + int ret = 0; + + if (!mdev_state) + return -EINVAL; + + mutex_init(&mdev_state->ops_lock); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) { + mutex_unlock(&hct_data.lock); + return -EBUSY; + } + + for (i = 0; i < MCCP_DEV_MAX; i++) { + if (test_bit(i, bitmap)) { + if (ref > iommu[i].ref) { + n = i; + ref = iommu[i].ref; + } + } + } + + if (n >= 0 && n < MCCP_DEV_MAX) { + mdev_state->iommu = &iommu[n]; + mdev_state->ref = iommu[n].ref++; + } else + ret = -EINVAL; + mutex_unlock(&hct_data.lock); + + return ret; +} + +static int hct_init_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + int ret = 0; + + if (!mdev || !mdev_state) + return -EINVAL; + + ret = hct_mdev_state_init(mdev_state); + if (ret) + return ret; + + mdev_state->mdev = mdev; + return 0; +} + +static void hct_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) + pr_warn("The mdev device is in use.\n"); + else { + mdev_state->iommu->ref--; + dev_set_drvdata(&mdev->dev, NULL); + } + mutex_unlock(&hct_data.lock); +} + +static ssize_t hct_read(struct vfio_device *vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u32 val; + size_t filled; + + while (count) { + if (count >= 4 && !(*ppos % 4)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u32))) + goto read_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u16))) + goto read_err; + + filled = 2; + } else { + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u8))) + goto read_err; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; + +read_err: + return -EFAULT; +} + +static ssize_t hct_write(struct vfio_device *vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u64 val; + u8 idx; + + while (count) { + size_t filled; + + if (count == MCCP_DEV_ID_SIZE && *ppos == MCCP_GET_QUEUE_FLAG) { + struct mdev_state *mdev_state; + struct hct_dev_ctx *dev_ctx; + struct hct_cmd_queue *cmd_q; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + goto write_err; + + if (copy_from_user(&val, buf, sizeof(u64)) || + val >= MCCP_DEV_QUEUE_MAX || + val < mdev_state->efd_start) + goto write_err; + + idx = val - mdev_state->efd_start; + dev_ctx = &mdev_state->iommu->dev_ctx; + cmd_q = &dev_ctx->cmd_q[idx]; + + mutex_lock(&cmd_q->q_lock); + if (kfifo_avail(&cmd_q->ectx_fifo)) + kfifo_put(&cmd_q->ectx_fifo, mdev_state->trigger[idx]); + mutex_unlock(&cmd_q->q_lock); + + filled = MCCP_DEV_ID_SIZE; + } else if (count >= 4 && !(*ppos % 4)) { + if (copy_from_user(&val, buf, sizeof(u32))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + if (copy_from_user(&val, buf, sizeof(u16))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 2; + } else { + if (copy_from_user(&val, buf, sizeof(u8))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 1; + } + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +write_err: + return -EFAULT; +} + +static int hct_get_region_info(struct mdev_device *mdev, + struct vfio_region_info *region_info, + u16 *cap_type_id, void **cap_type) +{ + struct mdev_state *mdev_state = NULL; + struct pci_dev *pdev = NULL; + unsigned int size = 0; + u32 bar_index; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + bar_index = region_info->index; + if (bar_index >= VFIO_PCI_NUM_REGIONS) + return -EINVAL; + + pdev = mdev_state->iommu->pdev; + mutex_lock(&mdev_state->ops_lock); + + switch (bar_index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + size = pdev->cfg_size; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + size = pci_resource_len(pdev, bar_index); + break; + default: + size = 0; + break; + } + + mdev_state->region_info[bar_index].size = size; + mdev_state->region_info[bar_index].vfio_offset = + MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + + region_info->size = size; + region_info->offset = MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + region_info->flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (size >= PAGE_SIZE) + region_info->flags |= VFIO_REGION_INFO_FLAG_MMAP; + + mutex_unlock(&mdev_state->ops_lock); + return 0; +} + +static int hct_get_irq_info(struct mdev_device *mdev, + struct vfio_irq_info *irq_info) +{ + switch (irq_info->index) { + case VFIO_PCI_INTX_IRQ_INDEX: + case VFIO_PCI_MSI_IRQ_INDEX: + case VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + + default: + return -EINVAL; + } + + irq_info->flags = VFIO_IRQ_INFO_EVENTFD; + irq_info->count = 1; + + if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) + irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; + + return 0; +} + +static int hct_get_device_info(struct mdev_device *mdev, + struct vfio_device_info *dev_info) +{ + dev_info->flags = VFIO_DEVICE_FLAGS_PCI; + dev_info->num_regions = VFIO_PCI_NUM_REGIONS; + dev_info->num_irqs = VFIO_PCI_NUM_IRQS; + + return 0; +} + +/* each ccp vq corresponding to one eventfd */ +static int hct_set_irq_efds(struct mdev_device *mdev, + struct vfio_irq_set *hdr, + void *data) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + int *fd = (int *)data; + int i; + + if (!mdev_state || !data) + return -EINVAL; + + if (hdr->index != VFIO_PCI_MSIX_IRQ_INDEX) + return -EINVAL; + + if ((hdr->flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) != VFIO_IRQ_SET_ACTION_TRIGGER) + return -EINVAL; + + if (hdr->start + hdr->count > MCCP_DEV_QUEUE_MAX) + return -EINVAL; + + mdev_state->efd_start = hdr->start; + for (i = 0; i < hdr->count; i++) { + struct eventfd_ctx *trigger; + + trigger = eventfd_ctx_fdget(fd[i]); + if (IS_ERR(trigger)) + return -1; + + mdev_state->trigger[mdev_state->efd_count++] = trigger; + } + + return 0; +} + +static int hct_reset(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -EINVAL; + + return 0; +} + +static long hct_ioctl(struct vfio_device *vdev, unsigned int cmd, + unsigned long arg) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mdev_state = NULL; + unsigned long minsz; + int ret = 0; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_device_info(mdev, &info); + if (ret) + return ret; + + memcpy(&mdev_state->dev_info, &info, sizeof(info)); + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_GET_REGION_INFO: + { + struct vfio_region_info info; + u16 cap_type_id = 0; + void *cap_type = NULL; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_region_info(mdev, &info, &cap_type_id, + &cap_type); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + + case VFIO_DEVICE_GET_IRQ_INFO: + { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if ((info.argsz < minsz) || + (info.index >= mdev_state->dev_info.num_irqs)) + return -EINVAL; + + ret = hct_get_irq_info(mdev, &info); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_SET_IRQS: + { + struct vfio_irq_set hdr; + u8 *data = NULL; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(&hdr, mdev_state->dev_info.num_irqs, + mdev_state->dev_info.num_irqs, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&mdev_state->ops_lock); + ret = hct_set_irq_efds(mdev, &hdr, data); + mutex_unlock(&mdev_state->ops_lock); + kfree(data); + + return ret; + } + case VFIO_DEVICE_RESET: + return hct_reset(mdev); + } + return -ENOTTY; +} + +static int hct_open(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref++; + mutex_unlock(&hct_data.lock); + + return 0; +} + +static void hct_close(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + int i; + + if (!mdev_state || !mdev_state->iommu) + return; + + for (i = 0; i < mdev_state->efd_count; i++) + eventfd_ctx_put(mdev_state->trigger[i]); + mdev_state->efd_count = 0; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref--; + mutex_unlock(&hct_data.lock); +} + +static ssize_t address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + struct pci_dev *pdev = NULL; + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + pdev = mdev_state->iommu->pdev; + size = sprintf(buf, "%04x:%02x:%02x.%x", + pci_domain_nr(pdev->bus), + pdev->bus->number, + 0x00ff & (pdev->devfn >> 8), + 0x00ff & pdev->devfn); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->iommu->id); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t idx_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->ref); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(address); +static DEVICE_ATTR_RO(id); +static DEVICE_ATTR_RO(idx); + +static struct attribute *mdev_dev_attrs[] = { + &dev_attr_address.attr, + &dev_attr_id.attr, + &dev_attr_idx.attr, + NULL, +}; + +static const struct attribute_group mdev_dev_group = { + .name = "vendor", + .attrs = mdev_dev_attrs, +}; + +static const struct attribute_group *hct_mdev_groups[] = { + &mdev_dev_group, + NULL, +}; + +static void hct_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void hct_mmap_close(struct vm_area_struct *vma) +{ +} + +static vm_fault_t hct_mmap_fault(struct vm_fault *vmf) +{ + vm_fault_t ret = VM_FAULT_NOPAGE; + struct vm_area_struct *vma = vmf->vma; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, pgprot_decrypted(vma->vm_page_prot))) + ret = VM_FAULT_SIGBUS; + return ret; +} + +static const struct vm_operations_struct hct_mmap_ops = { + .open = hct_mmap_open, + .close = hct_mmap_close, + .fault = hct_mmap_fault, +}; + +static int hct_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mds = dev_get_drvdata(&mdev->dev); + struct pci_dev *pdev = mds->iommu->pdev; + unsigned int index; + + index = vma->vm_pgoff >> (40 - PAGE_SHIFT); + vma->vm_private_data = mdev; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = pci_resource_start(pdev, index) >> PAGE_SHIFT; + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &hct_mmap_ops; + return 0; +} + +static const struct vfio_device_ops hct_mdev_ops = { + .init = hct_init_dev, + .release = hct_release_dev, + .open_device = hct_open, + .close_device = hct_close, + .read = hct_read, + .write = hct_write, + .ioctl = hct_ioctl, + .mmap = hct_mmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, +}; + +static int hct_mdev_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + int ret; + + if (!mdev) + return -EINVAL; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &hct_mdev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); + if (ret) { + vfio_put_device(&mdev_state->vdev); + return ret; + } + + dev_set_drvdata(&mdev->dev, mdev_state); + return 0; +} + +static void hct_mdev_remove(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + + vfio_unregister_group_dev(&mdev_state->vdev); + vfio_put_device(&mdev_state->vdev); +} + +static unsigned int hct_mdev_get_available(struct mdev_type *mtype) +{ + return MCCP_INSTANCE_MAX; +} + +static ssize_t hct_mdev_show_description(struct mdev_type *mtype, char *buf) +{ + return sprintf(buf, "This is HYGON CCP device!"); +} + +struct mdev_driver hct_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, + .driver = { + .name = "hct_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + .dev_groups = hct_mdev_groups, + }, + .probe = hct_mdev_probe, + .remove = hct_mdev_remove, + .get_available = hct_mdev_get_available, + .show_description = hct_mdev_show_description, +}; + +struct hct_private { + struct list_head head; + struct mutex lock; + unsigned int id; +}; + +static int hct_share_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct hct_private *private; + unsigned int id; + + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + mutex_lock(&hct_data.lock); + bitmap_set(hct_data.ids, 0, 1); + id = (unsigned int)find_first_zero_bit(hct_data.ids, MCCP_INSTANCE_MAX); + if (id < MCCP_INSTANCE_MAX) + bitmap_set(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + if (id >= MCCP_INSTANCE_MAX) { + kfree(private); + return -EBUSY; + } + + mutex_lock(&hct_share.lock); + hct_share.ref++; + hct_share.pagecount = MCCP_DEV_MAX; + mutex_unlock(&hct_share.lock); + + file->private_data = private; + private->id = id << MCCP_INSTANCE_OFFSET; + INIT_LIST_HEAD(&private->head); + mutex_init(&private->lock); + + return ret; +} + +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + +static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + int prot, unsigned long *pfn) +{ + struct page *page[1]; + struct vm_area_struct *vma; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + mmap_read_lock(mm); + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, + page, NULL); + if (ret == 1) { + *pfn = page_to_pfn(page[0]); + ret = 0; + goto done; + } + + vaddr = untagged_addr(vaddr); + +retry: + vma = find_vma_intersection(mm, vaddr, vaddr + 1); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; + } +done: + mmap_read_unlock(mm); + + return ret; +} + +struct page **hct_pin_memory(struct hct_private *private, unsigned long uaddr, + unsigned long ulen, unsigned long *n) +{ + unsigned long npages, size; + int npinned; + struct page **pages; + unsigned long first, last; + + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; + + first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; + last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; + npages = (last - first + 1); + + if (WARN_ON_ONCE(npages > INT_MAX)) + return NULL; + + size = npages * sizeof(struct page *); + if (size > PAGE_SIZE) + pages = vmalloc(size); + else + pages = kmalloc(size, GFP_KERNEL); + + if (!pages) + return NULL; + + /* Pin the user virtual address. */ + npinned = pin_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); + if (npinned != npages) + goto err; + + *n = npages; + return pages; + +err: + if (npinned > 0) + unpin_user_pages(pages, npinned); + kvfree(pages); + return NULL; +} + +static void hct_unpin_memory(struct hct_private *private, struct page **pages, + unsigned long npages) +{ + unpin_user_pages(pages, npages); + kvfree(pages); +} + +static inline int is_dma_share(dma_addr_t dma_iova, size_t dma_size) +{ + if (dma_iova >= MCCP_DMA_IOVA_OFFSET && + dma_iova + dma_size <= MCCP_DMA_IOVA_OFFSET + MCCP_DMA_IOVA_SIZE) + return 1; + else + return 0; +} + +static int hct_add_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + int ret = 0; + + if (!is_dma_share(dma_iova, dma_size)) + return 0; + + if (!hct_data.dma_share_size) { + hct_data.dma_share_iova = dma_iova; + hct_data.dma_share_size = dma_size; + } + + if (dma_iova != hct_data.dma_share_iova || + dma_size != hct_data.dma_share_size) + ret = -EINVAL; + else + hct_data.dma_share_ref++; + + return ret; +} + +static int hct_unmap_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + if (!is_dma_share(dma_iova, dma_size)) + return -EINVAL; + + if (hct_data.dma_share_size) { + if (hct_data.dma_share_iova == dma_iova && + hct_data.dma_share_size == dma_size) + hct_data.dma_share_ref--; + + if (hct_data.dma_share_ref == 0) { + iommu_unmap(hct_data.domain, hct_data.dma_share_iova, + hct_data.dma_share_size); + hct_data.dma_share_size = 0; + } + } + + return 0; +} + +static int hct_iommu_iova_check_unsafe(dma_addr_t dma_iova, size_t dma_size, + phys_addr_t phys_addr, + struct iommu_domain *domain) +{ + dma_addr_t iova; + int ret = 0; + size_t mapped = 0; + + iova = dma_iova; + while (iova < dma_iova + dma_size) { + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, iova); + if (phys) { + if ((phys_addr & PHY_ADDR_MASK) != (phys & PHY_ADDR_MASK)) { + pr_err("iova=0x%llx phys_addr=0x%llx phys=0x%llx, check fail.\n", + iova, phys_addr, phys); + ret = -1; + break; + } + mapped += PAGE_SIZE; + } + iova += PAGE_SIZE; + phys_addr += PAGE_SIZE; + } + + if (ret == 0 && mapped == dma_size) + ret = 1; + + return ret; +} + +static unsigned long get_num_contig_pages(unsigned long idx, + struct page **inpages, unsigned long npages) +{ + unsigned long paddr, next_paddr; + unsigned long i = idx + 1, pages = 1; + + /* find the number of contiguous pages starting from idx */ + paddr = page_to_phys(inpages[idx]); + while (i < npages) { + next_paddr = page_to_phys(inpages[i++]); + if ((paddr + PAGE_SIZE) == next_paddr) { + pages++; + paddr = next_paddr; + continue; + } + break; + } + + return pages; +} + +static struct hct_dma *hct_find_dma(struct hct_private *private, + dma_addr_t start, size_t size) +{ + struct hct_dma *dma, *tmp; + + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (dma->iova <= start && + dma->iova + dma->size >= start + size) + return dma; + } + + return NULL; +} + +/* + * Turns out AMD IOMMU has a page table bug where it won't map large pages + * to a region that previously mapped smaller pages. This should be fixed + * soon, so this is just a temporary workaround to break mappings down into + * PAGE_SIZE. Better to map smaller pages than nothing. + */ +static int map_try_harder(struct iommu_domain *domain, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + long i; + int ret = 0; + + for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { + ret = iommu_map(domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + PAGE_SIZE, prot, GFP_KERNEL); + if (ret) + break; + } + + for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) + iommu_unmap(domain, iova, PAGE_SIZE); + + return ret; +} + +/* + * only handle io-memory [vm_flags | VM_PFNMAP == true] + */ +static int hct_iommu_pfnmap(struct hct_private *private, struct hct_dma *dma) +{ + unsigned long pfn; + unsigned long vaddr; + dma_addr_t iova; + size_t mapped_size = 0; + size_t size; + int ret = 0; + + if (!private || !dma) + return -EINVAL; + + dma->pfnmap_flag = 1; + vaddr = dma->vaddr; + iova = dma->iova; + size = dma->size; + + mutex_lock(&hct_data.lock); + while (size) { + ret = vaddr_get_pfn(current->mm, vaddr, hct_data.prot, &pfn); + if (ret) + goto map_fail; + + ret = iommu_map(hct_data.domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + 1 << PAGE_SHIFT, hct_data.prot, + GFP_KERNEL); + if (ret) + goto map_fail; + + size -= 1 << PAGE_SHIFT; + vaddr += 1 << PAGE_SHIFT; + iova += 1 << PAGE_SHIFT; + mapped_size += 1 << PAGE_SHIFT; + } + mutex_unlock(&hct_data.lock); + + list_add(&dma->next, &private->head); + return 0; + +map_fail: + mutex_unlock(&hct_data.lock); + iommu_unmap(hct_data.domain, dma->iova, mapped_size); + return ret; +} + +static int hct_iommu_map(struct hct_private *private, unsigned long vaddr, + dma_addr_t dma_iova, size_t dma_size) +{ + struct hct_dma *dma; + struct page **pages; + unsigned long n, i, npages; + dma_addr_t iova, iova_end, iova_next; + int ret = 0; + size_t mapped_size = 0; + size_t iova_size = dma_size; + + if (!dma_size || (vaddr | dma_iova | dma_size) & (PAGE_SIZE - 1)) + return -EINVAL; + + if (hct_find_dma(private, dma_iova, dma_size)) + return 0; + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + pages = hct_pin_memory(private, vaddr, dma_size, &n); + if (!pages) { + /* We will think the vm_flags includes VM_PFNMAP. */ + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->size = dma_size; + ret = hct_iommu_pfnmap(private, dma); + if (ret) + kfree(dma); + return ret; + } + + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->pages = pages; + dma->size = dma_size; + dma->npages = n; + + iova = dma_iova; + iova_end = dma_iova + dma_size; + iova_size = dma_size; + + mutex_lock(&hct_data.lock); + for (i = 0; iova < iova_end && i < n; iova = iova_next, i += npages) { + size_t len; + phys_addr_t phys; + + npages = get_num_contig_pages(i, pages, n); + + /* When the value of npages is 524288, the value of npages * PAGE_SIZE + * will be 0x80000000 (bit31 is 1). + * When the value of npages is greater than 524288, if the type of len is int, + * the len will be a negative value. + */ + len = min_t(size_t, (npages * PAGE_SIZE), iova_size); + phys = page_to_phys(pages[i]); + + iova_size -= len; + iova_next = iova + len; + + ret = hct_iommu_iova_check_unsafe(iova, len, phys, hct_data.domain); + if (ret < 0) { + ret = -EBUSY; + goto map_fail; + } else if (ret > 0) { + ret = 0; + continue; + } + + ret = iommu_map(hct_data.domain, iova, phys, len, hct_data.prot, GFP_KERNEL); + if (ret) { + if (ret == -EBUSY) + ret = map_try_harder(hct_data.domain, iova, + phys >> PAGE_SHIFT, + len >> PAGE_SHIFT, + hct_data.prot); + if (ret) + goto map_fail; + } + mapped_size += len; + cond_resched(); + } + + ret = hct_add_dma_share_unsafe(dma_iova, dma_size); + if (ret) + goto map_fail; + + mutex_unlock(&hct_data.lock); + list_add(&dma->next, &private->head); + return 0; +map_fail: + if (mapped_size) + iommu_unmap(hct_data.domain, dma_iova, mapped_size); + mutex_unlock(&hct_data.lock); + hct_unpin_memory(private, pages, n); + kfree(dma); + return ret; +} + +static void hct_iommu_unmap(struct hct_private *private, + dma_addr_t iova, size_t size) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma; + + if (!size || (iova | size) & (PAGE_SIZE - 1)) + return; + + dma = hct_find_dma(private, iova, size); + if (!dma) + return; + + mutex_lock(&hct_data.lock); + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + list_del(&dma->next); + kfree(dma); + mutex_unlock(&hct_data.lock); +} + +static void hct_iommu_unmap_all(struct hct_private *private) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma, *tmp; + + mutex_lock(&hct_data.lock); + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (hct_unmap_dma_share_unsafe(dma->iova, dma->size)) + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + cond_resched(); + list_del(&dma->next); + kfree(dma); + } + mutex_unlock(&hct_data.lock); +} + +static struct page *hct_get_page(pgoff_t page_idx) +{ + u64 *node; + + mutex_lock(&hct_share.lock); + if (!hct_share.pages[page_idx]) { + hct_share.pages[page_idx] = + alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0); + if (!hct_share.pages[page_idx]) { + mutex_unlock(&hct_share.lock); + return NULL; + } + } + get_page(hct_share.pages[page_idx]); + + node = page_to_virt(hct_share.pages[page_idx]) + PAGE_SIZE - 8; + *node = hct_data.iommu[page_idx].pdev->dev.numa_node; + mutex_unlock(&hct_share.lock); + + return hct_share.pages[page_idx]; +} + +static void hct_put_pages(void) +{ + int i; + + for (i = 0; i < hct_share.pagecount; i++) { + if (!hct_share.pages[i]) + continue; + + put_page(hct_share.pages[i]); + hct_share.pages[i] = NULL; + } +} + +/* Clear status information when exiting abnormally. */ +static void hct_clear_shared_lock_memory(unsigned int gid) +{ + int *base; + int *queue_lck; + int dev_idx; + int queue_idx; + + for (dev_idx = 0; dev_idx < MCCP_DEV_MAX && + hct_share.pages[dev_idx]; dev_idx++) { + base = (int *)page_to_virt(hct_share.pages[dev_idx]); + for (queue_idx = 0; queue_idx < MCCP_DEV_QUEUE; queue_idx++) { + queue_lck = base + queue_idx; + if (*queue_lck == gid) + *queue_lck = 0; /* vq userid will be changed. */ + } + } +} + +static long hct_share_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl dev_ctrl; + unsigned int cmd_id; + unsigned int len; + unsigned int pasid; + int ret = 0; + struct hct_private *private = file->private_data; + + if (_IOC_TYPE(ioctl) != MCCP_SHARE_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(dev_ctrl)) + return -EINVAL; + + if (copy_from_user(&dev_ctrl, (void __user *)arg, sizeof(dev_ctrl))) + return -EINVAL; + + mutex_lock(&private->lock); + switch (dev_ctrl.op) { + case MCCP_SHARE_OP_DMA_MAP: + ret = hct_iommu_map(private, dev_ctrl.vaddr, dev_ctrl.iova, dev_ctrl.size); + break; + case MCCP_SHARE_OP_DMA_UNMAP: + hct_iommu_unmap(private, dev_ctrl.iova, dev_ctrl.size); + ret = 0; + break; + case MCCP_SHARE_OP_DMA_UNMAP_ALL: + hct_iommu_unmap_all(private); + ret = 0; + break; + case MCCP_SHARE_OP_GET_ID: + dev_ctrl.id = private->id; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + else + ret = 0; + break; + case MCCP_SHARE_OP_GET_PASID: + /* The different virtual machines is distinguished through pasid. */ + pasid = private->id >> MCCP_INSTANCE_OFFSET; + if (pasid >= MCCP_PASID_SIZE) { + ret = -EINVAL; + break; + } + + dev_ctrl.id = pasid; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + case MCCP_SHARE_OP_GET_VERSION: + memcpy(dev_ctrl.version, VERSION_STRING, sizeof(VERSION_STRING)); + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&private->lock); + + return ret; +} + +static int hct_share_close(struct inode *inode, struct file *file) +{ + struct hct_private *private = file->private_data; + unsigned int id = private->id >> MCCP_INSTANCE_OFFSET; + + mutex_lock(&hct_share.lock); + /* For the vm scenario, the hct_share.vaddr value is NULL. */ + if (hct_share.vaddr) { + struct hct_shared_cfg *cfg = hct_share.vaddr; + int i; + + if (private->id == cfg->ccps_ref_lock) + cfg->ccps_ref_lock = 0; + + for (i = 0; i < MCCP_DEV_MAX; i++) + if (private->id == (MCCP_INSTANCE_MASK & cfg->ccp_state[i])) + cfg->ccp_state[i] = 0; + + for (i = 0; i < MCCP_QUEUES_MAX; i++) + if (private->id == cfg->ccp_queue_state[i]) + cfg->ccp_queue_state[i] = MCCP_QUEUE_NEED_INIT; + + for (i = 0; i < MCCP_IOVA_MAX_SLOT; i++) + if (private->id == cfg->iova_slot[i]) + cfg->iova_slot[i] = 0; + } + + hct_clear_shared_lock_memory(private->id); + + hct_share.ref--; + if (!hct_share.ref) { + hct_put_pages(); + if (hct_share.vaddr) + memset(hct_share.vaddr, 0x00, hct_share.size); + } + mutex_unlock(&hct_share.lock); + + mutex_lock(&hct_data.lock); + if (id < MCCP_INSTANCE_MAX) + bitmap_clear(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + mutex_lock(&private->lock); + hct_iommu_unmap_all(private); + mutex_unlock(&private->lock); + + kfree(private); + return 0; +} + +static vm_fault_t hct_cdev_vma_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + pgoff_t page_idx = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + + if (page_idx >= hct_share.pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = hct_get_page(page_idx); + if (!vmf->page) + return VM_FAULT_SIGBUS; + + return 0; +} + +static const struct vm_operations_struct hct_cdev_vm_ops = { + .fault = hct_cdev_vma_fault, +}; + +static int hct_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long len; + int ret = 0; + + mutex_lock(&hct_share.lock); + len = vma->vm_end - vma->vm_start; + if (len == MCCP_SHARED_SIZE) { + /* The required size for vm is 64KB, + * and will follow the pagefault process. + */ + vma->vm_ops = &hct_cdev_vm_ops; + goto exit; + } + + if (unlikely(!hct_share.vaddr)) { + hct_share.size = (vma->vm_end - vma->vm_start); + hct_share.vaddr = kzalloc(hct_share.size, GFP_KERNEL); + } + + if (!hct_share.vaddr) { + ret = -ENOMEM; + goto exit; + } + + if (hct_share.size != (vma->vm_end - vma->vm_start)) { + ret = -EINVAL; + pr_err("invalid hct share size\n"); + goto exit; + } + + ret = remap_pfn_range(vma, vma->vm_start, + virt_to_phys(hct_share.vaddr) >> PAGE_SHIFT, + hct_share.size, + vma->vm_page_prot); +exit: + mutex_unlock(&hct_share.lock); + return ret; +} + +static const struct file_operations hct_share_fops = { + .owner = THIS_MODULE, + .open = hct_share_open, + .release = hct_share_close, + .mmap = hct_share_mmap, + .unlocked_ioctl = hct_share_ioctl, +}; + +static struct miscdevice hct_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_share", + .fops = &hct_share_fops, + .mode = 0666, +}; + +static int hct_share_init(void) +{ + int i; + int ret; + + memset(&hct_data, 0x00, sizeof(hct_data)); + mutex_init(&hct_data.lock); + + for (i = 0; i < MCCP_DEV_MAX; i++) + mutex_init(&hct_data.iommu[i].lock); + + ret = misc_register(&hct_misc); + if (!ret) { + hct_data.domain = iommu_domain_alloc(&pci_bus_type); + if (!hct_data.domain) { + misc_deregister(&hct_misc); + if (!pci_bus_type.iommu_ops) { + pr_err("iommu is disabled\n"); + return -ENODEV; + } + return -ENOMEM; + } + hct_data.prot = IOMMU_READ | IOMMU_WRITE; + } + + return ret; +} + +static void hct_share_exit(void) +{ + int i; + struct hct_iommu *iommu; + struct iommu_domain *domain; + struct pci_dev *pdev; + + mutex_lock(&hct_data.lock); + for (i = 0; i < MCCP_DEV_MAX; i++) { + iommu = &hct_data.iommu[i]; + pdev = iommu->pdev; + if (pdev) { + domain = iommu_get_domain_for_dev(&pdev->dev); + if (domain == hct_data.domain) + iommu_detach_device(domain, &pdev->dev); + } + } + mutex_unlock(&hct_data.lock); + + if (hct_data.domain) + iommu_domain_free(hct_data.domain); + + misc_deregister(&hct_misc); + kfree(hct_share.vaddr); +} + +static int hct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return hct_iommu_alloc(pdev); +} + +static void hct_pci_remove(struct pci_dev *pdev) +{ + struct hct_iommu *iommu; + struct hct_dev_ctx *dev_ctx; + int i; + + iommu = pci_get_drvdata(pdev); + if (!iommu) { + pci_set_drvdata(pdev, NULL); + return; + } + + dev_ctx = &iommu->dev_ctx; + for (i = 0; i < dev_ctx->q_count; i++) + kfifo_free(&dev_ctx->cmd_q[i].ectx_fifo); + + if (dev_ctx->io_regs) + iounmap(dev_ctx->io_regs); + if (dev_ctx->irq) { + tasklet_kill(&dev_ctx->irq_tasklet); + free_irq(dev_ctx->irq, dev_ctx); + dev_ctx->irq = 0; + pci_free_irq_vectors(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + } + hct_iommu_free(iommu); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hct_pci_driver = { + .name = "hct", + .id_table = NULL, + .probe = hct_pci_probe, + .remove = hct_pci_remove, +}; + +static const struct file_operations hct_vd_fops = { + .owner = THIS_MODULE, +}; + +static void hct_device_release(struct device *dev) +{ + dev_dbg(dev, "hct: released\n"); +} + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +static int __init hct_dev_init(void) +{ + int ret = 0; + u32 vendor_ebx = 0; + u32 vendor_ecx = 0; + u32 vendor_edx = 0; + u32 vendor_eax = 0; + + cpuid(0, &vendor_eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + + /* HygonGenuine */ + if (!(vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx)) { + pr_err("Not hygon hardware\n"); + return -1; + } + + ret = mdev_register_driver(&hct_mdev_driver); + if (ret) + return ret; + + memset(&hct_dev, 0, sizeof(hct_dev)); + + ret = alloc_chrdev_region(&hct_dev.vd_devt, 0, MINORMASK + 1, + MCCP_NAME); + + if (ret < 0) { + pr_err("Error: failed to register hct_dev, err:%d\n", ret); + goto failed0; + } + + cdev_init(&hct_dev.vd_cdev, &hct_vd_fops); + cdev_add(&hct_dev.vd_cdev, hct_dev.vd_devt, MINORMASK + 1); + + hct_dev.vd_class = class_create(MCCP_CLASS_NAME); + if (IS_ERR(hct_dev.vd_class)) { + pr_err("Error: failed to register hct_dev class\n"); + ret = PTR_ERR(hct_dev.vd_class); + goto failed1; + } + + hct_dev.dev.class = hct_dev.vd_class; + hct_dev.dev.release = hct_device_release; + dev_set_name(&hct_dev.dev, "%s", MCCP_NAME); + hct_dev.dev.devt = hct_dev.vd_devt; + + ret = device_register(&hct_dev.dev); + if (ret) + goto failed2; + + ret = mdev_register_parent(&hct_dev.mdev_parent, &hct_dev.dev, + &hct_mdev_driver, hct_mdev_types, + ARRAY_SIZE(hct_mdev_types)); + if (ret) + goto failed3; + + ret = hct_share_init(); + if (ret) + goto failed4; + + memset(&hct_share, 0x00, sizeof(hct_share)); + mutex_init(&hct_share.lock); + + ret = pci_register_driver(&hct_pci_driver); + if (ret) + goto failed5; + + goto all_done; + +failed5: + hct_share_exit(); + +failed4: + mdev_unregister_parent(&hct_dev.mdev_parent); + +failed3: + device_unregister(&hct_dev.dev); + +failed2: + class_destroy(hct_dev.vd_class); + +failed1: + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + +failed0: + mdev_unregister_driver(&hct_mdev_driver); + +all_done: + return ret; +} + +static void __exit hct_dev_exit(void) +{ + hct_share_exit(); + hct_dev.dev.bus = NULL; + mdev_unregister_parent(&hct_dev.mdev_parent); + + device_unregister(&hct_dev.dev); + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + class_destroy(hct_dev.vd_class); + mdev_unregister_driver(&hct_mdev_driver); + hct_dev.vd_class = NULL; + + pci_unregister_driver(&hct_pci_driver); +} + +module_init(hct_dev_init) +module_exit(hct_dev_exit) + +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION_STRING); +MODULE_AUTHOR(DRIVER_AUTHOR); -- Gitee From 8161e74fd6192164b636a88d83bfaa0940997f39 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:36:07 +0800 Subject: [PATCH 937/953] anolis: HCT supporting memory encryption host ANBZ: #8582 1. hct support run vfio-noiommu mode on memmory-encrypt host 2. change wb attribute for the bar memory of hct virtual machine. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 132 ++++++++++++++++++++++++++++++++- 1 file changed, 131 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index dd386fec2b07..392371af865a 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -27,18 +27,21 @@ #include #include #include +#include #include /** * VERSION_STRING modification instructions: * 0.1 -- support hct/mdev mode. * 0.2 -- supoort qemu virtualization. + * 0.3 -- support host-noiommu mode memory encryption function, + * and performance optimization in virtual machines (enable caching). */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.2" +#define VERSION_STRING "0.3" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -67,6 +70,11 @@ #define MCCP_SHARE_OP_DMA_UNMAP 0x05 #define MCCP_SHARE_OP_GET_VERSION 0x06 +#define MCCP_NOIOMMU_IOC_TYPE MCCP_SHARE_IOC_TYPE +#define MCCP_NOIOMMU_OP MCCP_SHARE_OP +#define MCCP_NOIOMMU_SET_MEMORY_WB 0x01 +#define MCCP_NOIOMMU_GET_SME_ACTIVE 0x02 + #define MCCP_SHARE_IOMMU_MAGIC 0x3d6a9c5728633b9e #define PCI_RESOURCE_BAR2 2 @@ -130,11 +138,16 @@ struct hct_dev_ctrl { union { unsigned char version[VERSION_SIZE]; unsigned int id; + unsigned long sme_mask; struct { unsigned long vaddr; unsigned long iova; unsigned long size; }; + struct { + unsigned long vt_addr; + unsigned int nr_pages; + }; }; }; @@ -2064,6 +2077,115 @@ static void hct_device_release(struct device *dev) dev_dbg(dev, "hct: released\n"); } +/* set the flags PAT, PCT and PWT of page all to 0 + * for obtaining cache properties. + */ +void hct_noiommu_set_memory_wb(unsigned long address) +{ + pgd_t *pgd = current->mm->pgd + pgd_index(address); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t old_pte; + pte_t new_pte; + pgprot_t new_prot; + unsigned long pfn; + + if (pgd_none(*pgd)) { + pr_err("pgd val shouldn't be none\n"); + return; + } + + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d)) { + pr_err("p4d val shouldn't be none\n"); + return; + } + + pud = pud_offset(p4d, address); + if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) { + pr_err("pud val is invalid.\n"); + return; + } + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) { + pr_err("pmd val is invalid.\n"); + return; + } + + pte = pte_offset_kernel(pmd, address); + if (pte_none(*pte)) { + pr_err("pte val shouldn't be none\n"); + return; + } + + old_pte = *pte; + pfn = pte_pfn(old_pte); + new_prot = pte_pgprot(old_pte); + pgprot_val(new_prot) &= ~(_PAGE_PAT | _PAGE_PCD | _PAGE_PWT); + new_pte = pfn_pte(pfn, new_prot); + set_pte_atomic(pte, new_pte); +} + +static DEFINE_MUTEX(hct_noiommu_lock); +static long hct_noiommu_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl ctrl; + unsigned int cmd_id; + unsigned int len; + int ret = 0; + + if (_IOC_TYPE(ioctl) != MCCP_NOIOMMU_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(ctrl)) + return -EINVAL; + + if (copy_from_user(&ctrl, (void __user *)arg, sizeof(ctrl))) + return -EINVAL; + + mutex_lock(&hct_noiommu_lock); + switch (ctrl.op) { + case MCCP_NOIOMMU_SET_MEMORY_WB: + while (ctrl.nr_pages && ctrl.nr_pages--) { + hct_noiommu_set_memory_wb(ctrl.vt_addr); + ctrl.vt_addr += PAGE_SIZE; + } + break; + case MCCP_NOIOMMU_GET_SME_ACTIVE: + ctrl.sme_mask = sme_me_mask; + if (copy_to_user((void __user *)arg, &ctrl, sizeof(ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&hct_noiommu_lock); + + return ret; +} + +const struct file_operations hct_noiommu_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hct_noiommu_ioctl, +}; + +struct miscdevice hct_noiommu_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_noiommu", + .fops = &hct_noiommu_fops, +}; + #define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 #define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 #define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e @@ -2086,6 +2208,9 @@ static int __init hct_dev_init(void) return -1; } + if (!iommu_present(&pci_bus_type)) + return misc_register(&hct_noiommu_misc); + ret = mdev_register_driver(&hct_mdev_driver); if (ret) return ret; @@ -2163,6 +2288,11 @@ static int __init hct_dev_init(void) static void __exit hct_dev_exit(void) { + if (!iommu_present(&pci_bus_type)) { + misc_deregister(&hct_noiommu_misc); + return; + } + hct_share_exit(); hct_dev.dev.bus = NULL; mdev_unregister_parent(&hct_dev.mdev_parent); -- Gitee From 3dc331aa4889a77d27f04dc80bac95bc8563e8ae Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:41:51 +0800 Subject: [PATCH 938/953] anolis: bugifx: fix build issue when the module mdev is disabled. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index 392371af865a..d2e179a2406b 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -23,12 +23,14 @@ #include #include #include -#include #include #include #include #include #include +#if IS_ENABLED(CONFIG_VFIO_MDEV) +#include +#endif /** * VERSION_STRING modification instructions: @@ -36,12 +38,13 @@ * 0.2 -- supoort qemu virtualization. * 0.3 -- support host-noiommu mode memory encryption function, * and performance optimization in virtual machines (enable caching). + * 0.4 -- support compiling hct.ko when mdev module is disabled. */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.3" +#define VERSION_STRING "0.4" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -192,6 +195,7 @@ struct hct_iommu { unsigned long ref; }; +#if IS_ENABLED(CONFIG_VFIO_MDEV) static struct hct_data { struct hct_iommu iommu[MCCP_DEV_MAX]; struct mutex lock; @@ -2076,6 +2080,7 @@ static void hct_device_release(struct device *dev) { dev_dbg(dev, "hct: released\n"); } +#endif /* IS_ENABLED(CONFIG_VFIO_MDEV) */ /* set the flags PAT, PCT and PWT of page all to 0 * for obtaining cache properties. @@ -2192,7 +2197,7 @@ struct miscdevice hct_noiommu_misc = { static int __init hct_dev_init(void) { - int ret = 0; + int __maybe_unused ret = 0; u32 vendor_ebx = 0; u32 vendor_ecx = 0; u32 vendor_edx = 0; @@ -2208,6 +2213,7 @@ static int __init hct_dev_init(void) return -1; } +#if IS_ENABLED(CONFIG_VFIO_MDEV) if (!iommu_present(&pci_bus_type)) return misc_register(&hct_noiommu_misc); @@ -2284,10 +2290,15 @@ static int __init hct_dev_init(void) all_done: return ret; +#else + pr_info("The module mdev is disabled.\n"); + return misc_register(&hct_noiommu_misc); +#endif } static void __exit hct_dev_exit(void) { +#if IS_ENABLED(CONFIG_VFIO_MDEV) if (!iommu_present(&pci_bus_type)) { misc_deregister(&hct_noiommu_misc); return; @@ -2305,6 +2316,9 @@ static void __exit hct_dev_exit(void) hct_dev.vd_class = NULL; pci_unregister_driver(&hct_pci_driver); +#else + misc_deregister(&hct_noiommu_misc); +#endif } module_init(hct_dev_init) -- Gitee From e30962e4266fc1aaa5c692063d248f47291aac12 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:46:13 +0800 Subject: [PATCH 939/953] anolis: Change the maximum number of supported ccps from 16 to 48. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index d2e179a2406b..3a29cd953943 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -39,12 +39,13 @@ * 0.3 -- support host-noiommu mode memory encryption function, * and performance optimization in virtual machines (enable caching). * 0.4 -- support compiling hct.ko when mdev module is disabled. + * 0.5 -- change the maximum number of supported ccps from 16 to 48. */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.4" +#define VERSION_STRING "0.5" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -92,7 +93,7 @@ #define MCCP_INSTANCE_MASK (~((1u << MCCP_INSTANCE_OFFSET) - 1)) #define MCCP_PASID_SIZE (1 << 8) #define MCCP_IOVA_MAX_SLOT 1024 -#define MCCP_DEV_MAX 16 +#define MCCP_DEV_MAX 48 #define MCCP_DEV_QUEUE_MAX 8 #define MCCP_DEV_QUEUE 5 #define MCCP_QUEUES_MAX (MCCP_DEV_MAX * MCCP_DEV_QUEUE_MAX) -- Gitee From 147fd39e497ad834a524941f15da4f5580ec0499 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 6 May 2024 14:31:25 +0800 Subject: [PATCH 940/953] anolis: support 1024 processes simutaneously in the hct-mdev mode. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index 3a29cd953943..719b3287d151 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -1250,7 +1250,6 @@ static int hct_share_open(struct inode *inode, struct file *file) return -ENOMEM; mutex_lock(&hct_data.lock); - bitmap_set(hct_data.ids, 0, 1); id = (unsigned int)find_first_zero_bit(hct_data.ids, MCCP_INSTANCE_MAX); if (id < MCCP_INSTANCE_MAX) bitmap_set(hct_data.ids, id, 1); @@ -1267,7 +1266,13 @@ static int hct_share_open(struct inode *inode, struct file *file) mutex_unlock(&hct_share.lock); file->private_data = private; - private->id = id << MCCP_INSTANCE_OFFSET; + /* + * At user space, each process is assigned a different number + * which cannot be 0, as the identifier for the process. + * The number is assigned by id, so the value of id needs to + * start from 1, and cannot be 0. + */ + private->id = (++id) << MCCP_INSTANCE_OFFSET; INIT_LIST_HEAD(&private->head); mutex_init(&private->lock); @@ -1891,7 +1896,7 @@ static int hct_share_close(struct inode *inode, struct file *file) mutex_unlock(&hct_share.lock); mutex_lock(&hct_data.lock); - if (id < MCCP_INSTANCE_MAX) + if (--id < MCCP_INSTANCE_MAX) bitmap_clear(hct_data.ids, id, 1); mutex_unlock(&hct_data.lock); -- Gitee From 6d5c1d377bc7d8db3e4b84fc4ee78832aaafdfc2 Mon Sep 17 00:00:00 2001 From: Jack Brennen Date: Tue, 26 Sep 2023 08:40:44 -0400 Subject: [PATCH 941/953] modpost: Optimize symbol search from linear to binary search ANBZ: #8919 commit 4074532758c5c367d3fcb8d124150824a254659d upstream. Modify modpost to use binary search for converting addresses back into symbol references. Previously it used linear search. This change saves a few seconds of wall time for defconfig builds, but can save several minutes on allyesconfigs. Before: $ make LLVM=1 -j128 allyesconfig vmlinux -s KCFLAGS="-Wno-error" $ time scripts/mod/modpost -M -m -a -N -o vmlinux.symvers vmlinux.o 198.38user 1.27system 3:19.71elapsed After: $ make LLVM=1 -j128 allyesconfig vmlinux -s KCFLAGS="-Wno-error" $ time scripts/mod/modpost -M -m -a -N -o vmlinux.symvers vmlinux.o 11.91user 0.85system 0:12.78elapsed Signed-off-by: Jack Brennen Tested-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Reviewed-by: WU Hao Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3111 --- scripts/mod/Makefile | 4 +- scripts/mod/modpost.c | 70 ++------------ scripts/mod/modpost.h | 25 +++++ scripts/mod/symsearch.c | 199 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 232 insertions(+), 66 deletions(-) create mode 100644 scripts/mod/symsearch.c diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile index c9e38ad937fd..3c54125eb373 100644 --- a/scripts/mod/Makefile +++ b/scripts/mod/Makefile @@ -5,7 +5,7 @@ CFLAGS_REMOVE_empty.o += $(CC_FLAGS_LTO) hostprogs-always-y += modpost mk_elfconfig always-y += empty.o -modpost-objs := modpost.o file2alias.o sumversion.o +modpost-objs := modpost.o file2alias.o sumversion.o symsearch.o devicetable-offsets-file := devicetable-offsets.h @@ -16,7 +16,7 @@ targets += $(devicetable-offsets-file) devicetable-offsets.s # dependencies on generated files need to be listed explicitly -$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h +$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o $(obj)/symsearch.o: $(obj)/elfconfig.h $(obj)/file2alias.o: $(obj)/$(devicetable-offsets-file) quiet_cmd_elfconfig = MKELF $@ diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 5191fdbd3fa2..66589fb4e9ae 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -22,7 +22,6 @@ #include #include "modpost.h" #include "../../include/linux/license.h" -#include "../../include/linux/module_symbol.h" static bool module_enabled; /* Are we using CONFIG_MODVERSIONS? */ @@ -577,11 +576,14 @@ static int parse_elf(struct elf_info *info, const char *filename) *p = TO_NATIVE(*p); } + symsearch_init(info); + return 1; } static void parse_elf_finish(struct elf_info *info) { + symsearch_finish(info); release_file(info->hdr, info->size); } @@ -1042,71 +1044,10 @@ static int secref_whitelist(const char *fromsec, const char *fromsym, return 1; } -/* - * If there's no name there, ignore it; likewise, ignore it if it's - * one of the magic symbols emitted used by current tools. - * - * Otherwise if find_symbols_between() returns those symbols, they'll - * fail the whitelist tests and cause lots of false alarms ... fixable - * only by merging __exit and __init sections into __text, bloating - * the kernel (which is especially evil on embedded platforms). - */ -static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym) -{ - const char *name = elf->strtab + sym->st_name; - - if (!name || !strlen(name)) - return 0; - return !is_mapping_symbol(name); -} - -/* Look up the nearest symbol based on the section and the address */ -static Elf_Sym *find_nearest_sym(struct elf_info *elf, Elf_Addr addr, - unsigned int secndx, bool allow_negative, - Elf_Addr min_distance) -{ - Elf_Sym *sym; - Elf_Sym *near = NULL; - Elf_Addr sym_addr, distance; - bool is_arm = (elf->hdr->e_machine == EM_ARM); - - for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) { - if (get_secindex(elf, sym) != secndx) - continue; - if (!is_valid_name(elf, sym)) - continue; - - sym_addr = sym->st_value; - - /* - * For ARM Thumb instruction, the bit 0 of st_value is set - * if the symbol is STT_FUNC type. Mask it to get the address. - */ - if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC) - sym_addr &= ~1; - - if (addr >= sym_addr) - distance = addr - sym_addr; - else if (allow_negative) - distance = sym_addr - addr; - else - continue; - - if (distance <= min_distance) { - min_distance = distance; - near = sym; - } - - if (min_distance == 0) - break; - } - return near; -} - static Elf_Sym *find_fromsym(struct elf_info *elf, Elf_Addr addr, unsigned int secndx) { - return find_nearest_sym(elf, addr, secndx, false, ~0); + return symsearch_find_nearest(elf, addr, secndx, false, ~0); } static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym) @@ -1119,7 +1060,8 @@ static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym) * Strive to find a better symbol name, but the resulting name may not * match the symbol referenced in the original code. */ - return find_nearest_sym(elf, addr, get_secindex(elf, sym), true, 20); + return symsearch_find_nearest(elf, addr, get_secindex(elf, sym), + true, 20); } static bool is_executable_section(struct elf_info *elf, unsigned int secndx) diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index 5f94c2c9f2d9..6413f26fcb6b 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h @@ -10,6 +10,7 @@ #include #include #include +#include "../../include/linux/module_symbol.h" #include "list.h" #include "elfconfig.h" @@ -128,6 +129,8 @@ struct elf_info { * take shndx from symtab_shndx_start[N] instead */ Elf32_Word *symtab_shndx_start; Elf32_Word *symtab_shndx_stop; + + struct symsearch *symsearch; }; /* Accessor for sym->st_shndx, hides ugliness of "64k sections" */ @@ -154,6 +157,28 @@ static inline unsigned int get_secindex(const struct elf_info *info, return index; } +/* + * If there's no name there, ignore it; likewise, ignore it if it's + * one of the magic symbols emitted used by current tools. + * + * Internal symbols created by tools should be ignored by modpost. + */ +static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym) +{ + const char *name = elf->strtab + sym->st_name; + + if (!name || !strlen(name)) + return 0; + return !is_mapping_symbol(name); +} + +/* symsearch.c */ +void symsearch_init(struct elf_info *elf); +void symsearch_finish(struct elf_info *elf); +Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr, + unsigned int secndx, bool allow_negative, + Elf_Addr min_distance); + /* file2alias.c */ void handle_moddevtable(struct module *mod, struct elf_info *info, Elf_Sym *sym, const char *symname); diff --git a/scripts/mod/symsearch.c b/scripts/mod/symsearch.c new file mode 100644 index 000000000000..aa4ed51f9960 --- /dev/null +++ b/scripts/mod/symsearch.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Helper functions for finding the symbol in an ELF which is "nearest" + * to a given address. + */ + +#include "modpost.h" + +struct syminfo { + unsigned int symbol_index; + unsigned int section_index; + Elf_Addr addr; +}; + +/* + * Container used to hold an entire binary search table. + * Entries in table are ascending, sorted first by section_index, + * then by addr, and last by symbol_index. The sorting by + * symbol_index is used to ensure predictable behavior when + * multiple symbols are present with the same address; all + * symbols past the first are effectively ignored, by eliding + * them in symsearch_fixup(). + */ +struct symsearch { + unsigned int table_size; + struct syminfo table[]; +}; + +static int syminfo_compare(const void *s1, const void *s2) +{ + const struct syminfo *sym1 = s1; + const struct syminfo *sym2 = s2; + + if (sym1->section_index > sym2->section_index) + return 1; + if (sym1->section_index < sym2->section_index) + return -1; + if (sym1->addr > sym2->addr) + return 1; + if (sym1->addr < sym2->addr) + return -1; + if (sym1->symbol_index > sym2->symbol_index) + return 1; + if (sym1->symbol_index < sym2->symbol_index) + return -1; + return 0; +} + +static unsigned int symbol_count(struct elf_info *elf) +{ + unsigned int result = 0; + + for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) { + if (is_valid_name(elf, sym)) + result++; + } + return result; +} + +/* + * Populate the search array that we just allocated. + * Be slightly paranoid here. The ELF file is mmap'd and could + * conceivably change between symbol_count() and symsearch_populate(). + * If we notice any difference, bail out rather than potentially + * propagating errors or crashing. + */ +static void symsearch_populate(struct elf_info *elf, + struct syminfo *table, + unsigned int table_size) +{ + bool is_arm = (elf->hdr->e_machine == EM_ARM); + + for (Elf_Sym *sym = elf->symtab_start; sym < elf->symtab_stop; sym++) { + if (is_valid_name(elf, sym)) { + if (table_size-- == 0) + fatal("%s: size mismatch\n", __func__); + table->symbol_index = sym - elf->symtab_start; + table->section_index = get_secindex(elf, sym); + table->addr = sym->st_value; + + /* + * For ARM Thumb instruction, the bit 0 of st_value is + * set if the symbol is STT_FUNC type. Mask it to get + * the address. + */ + if (is_arm && ELF_ST_TYPE(sym->st_info) == STT_FUNC) + table->addr &= ~1; + + table++; + } + } + + if (table_size != 0) + fatal("%s: size mismatch\n", __func__); +} + +/* + * Do any fixups on the table after sorting. + * For now, this just finds adjacent entries which have + * the same section_index and addr, and it propagates + * the first symbol_index over the subsequent entries, + * so that only one symbol_index is seen for any given + * section_index and addr. This ensures that whether + * we're looking at an address from "above" or "below" + * that we see the same symbol_index. + * This does leave some duplicate entries in the table; + * in practice, these are a small fraction of the + * total number of entries, and they are harmless to + * the binary search algorithm other than a few occasional + * unnecessary comparisons. + */ +static void symsearch_fixup(struct syminfo *table, unsigned int table_size) +{ + /* Don't look at index 0, it will never change. */ + for (unsigned int i = 1; i < table_size; i++) { + if (table[i].addr == table[i - 1].addr && + table[i].section_index == table[i - 1].section_index) { + table[i].symbol_index = table[i - 1].symbol_index; + } + } +} + +void symsearch_init(struct elf_info *elf) +{ + unsigned int table_size = symbol_count(elf); + + elf->symsearch = NOFAIL(malloc(sizeof(struct symsearch) + + sizeof(struct syminfo) * table_size)); + elf->symsearch->table_size = table_size; + + symsearch_populate(elf, elf->symsearch->table, table_size); + qsort(elf->symsearch->table, table_size, + sizeof(struct syminfo), syminfo_compare); + + symsearch_fixup(elf->symsearch->table, table_size); +} + +void symsearch_finish(struct elf_info *elf) +{ + free(elf->symsearch); + elf->symsearch = NULL; +} + +/* + * Find the syminfo which is in secndx and "nearest" to addr. + * allow_negative: allow returning a symbol whose address is > addr. + * min_distance: ignore symbols which are further away than this. + * + * Returns a pointer into the symbol table for success. + * Returns NULL if no legal symbol is found within the requested range. + */ +Elf_Sym *symsearch_find_nearest(struct elf_info *elf, Elf_Addr addr, + unsigned int secndx, bool allow_negative, + Elf_Addr min_distance) +{ + unsigned int hi = elf->symsearch->table_size; + unsigned int lo = 0; + struct syminfo *table = elf->symsearch->table; + struct syminfo target; + + target.addr = addr; + target.section_index = secndx; + target.symbol_index = ~0; /* compares greater than any actual index */ + while (hi > lo) { + unsigned int mid = lo + (hi - lo) / 2; /* Avoids overflow */ + + if (syminfo_compare(&table[mid], &target) > 0) + hi = mid; + else + lo = mid + 1; + } + + /* + * table[hi], if it exists, is the first entry in the array which + * lies beyond target. table[hi - 1], if it exists, is the last + * entry in the array which comes before target, including the + * case where it perfectly matches the section and the address. + * + * Note -- if the address we're looking up falls perfectly + * in the middle of two symbols, this is written to always + * prefer the symbol with the lower address. + */ + Elf_Sym *result = NULL; + + if (allow_negative && + hi < elf->symsearch->table_size && + table[hi].section_index == secndx && + table[hi].addr - addr <= min_distance) { + min_distance = table[hi].addr - addr; + result = &elf->symtab_start[table[hi].symbol_index]; + } + if (hi > 0 && + table[hi - 1].section_index == secndx && + addr - table[hi - 1].addr <= min_distance) { + result = &elf->symtab_start[table[hi - 1].symbol_index]; + } + return result; +} -- Gitee From eeff6f198f6711879210da8b3e4f68b780c41f6d Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Tue, 14 May 2024 09:27:47 +0800 Subject: [PATCH 942/953] anolis: Revert "anolis: net: txgbe: fix i2c dev name cannot match clkdev" ANBZ: #9013 This reverts commit 247b6cf8b8138b345822c0585d04e8a62ce97211. This patch was applied by upstream, so revert it and backport from upstream. Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 129cd5042180..328d9a9eb3db 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,8 +20,6 @@ #include "txgbe_phy.h" #include "txgbe_hw.h" -#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw" - static int txgbe_swnodes_register(struct txgbe *txgbe) { struct txgbe_nodes *nodes = &txgbe->nodes; @@ -553,8 +551,8 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "%s.%d", - TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev)); + snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d", + pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -616,7 +614,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = TXGBE_I2C_CLK_DEV_NAME; + info.name = "i2c_designware"; info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); -- Gitee From 79096006f89fb692f4bca55a4d1fc9f3a69e3d84 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Tue, 2 Apr 2024 10:18:43 +0800 Subject: [PATCH 943/953] net: txgbe: fix i2c dev name cannot match clkdev ANBZ: #9013 commit c644920ce9220d83e070f575a4df711741c07f07 upstream. txgbe clkdev shortened clk_name, so i2c_dev info_name also need to shorten. Otherwise, i2c_dev cannot initialize clock. Fixes: e30cef001da2 ("net: txgbe: fix clk_name exceed MAX_DEV_ID limits") Signed-off-by: Duanqiang Wen Link: https://lore.kernel.org/r/20240402021843.126192-1-duanqiangwen@net-swift.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 328d9a9eb3db..129cd5042180 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,6 +20,8 @@ #include "txgbe_phy.h" #include "txgbe_hw.h" +#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw" + static int txgbe_swnodes_register(struct txgbe *txgbe) { struct txgbe_nodes *nodes = &txgbe->nodes; @@ -551,8 +553,8 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d", - pci_dev_id(pdev)); + snprintf(clk_name, sizeof(clk_name), "%s.%d", + TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -614,7 +616,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = "i2c_designware"; + info.name = TXGBE_I2C_CLK_DEV_NAME; info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); -- Gitee From d57eb265cf12f43507d73c79729dbe75e2a44c49 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 22 Apr 2024 16:41:08 +0800 Subject: [PATCH 944/953] Revert "net: txgbe: fix i2c dev name cannot match clkdev" ANBZ: #9013 commit 8d6bf83f6740ba52a59e25dad360e1e87ef47666 upstream. This reverts commit c644920ce9220d83e070f575a4df711741c07f07. when register i2c dev, txgbe shorten "i2c_designware" to "i2c_dw", will cause this i2c dev can't match platfom driver i2c_designware_platform. Signed-off-by: Duanqiang Wen Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240422084109.3201-1-duanqiangwen@net-swift.com Signed-off-by: Jakub Kicinski Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 129cd5042180..328d9a9eb3db 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,8 +20,6 @@ #include "txgbe_phy.h" #include "txgbe_hw.h" -#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw" - static int txgbe_swnodes_register(struct txgbe *txgbe) { struct txgbe_nodes *nodes = &txgbe->nodes; @@ -553,8 +551,8 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "%s.%d", - TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev)); + snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d", + pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -616,7 +614,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = TXGBE_I2C_CLK_DEV_NAME; + info.name = "i2c_designware"; info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); -- Gitee From 816b03441c939fef4ec934505594d94619105131 Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Mon, 22 Apr 2024 16:41:09 +0800 Subject: [PATCH 945/953] Revert "net: txgbe: fix clk_name exceed MAX_DEV_ID limits" ANBZ: #9013 commit edd2d250fb3bb5d70419ae82c1f9dbb9684dffd3 upstream. This reverts commit e30cef001da259e8df354b813015d0e5acc08740. commit 99f4570cfba1 ("clkdev: Update clkdev id usage to allow for longer names") can fix clk_name exceed MAX_DEV_ID limits, so this commit is meaningless. Signed-off-by: Duanqiang Wen Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240422084109.3201-2-duanqiangwen@net-swift.com Signed-off-by: Jakub Kicinski Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 328d9a9eb3db..b1b5cdc04a92 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -551,7 +551,7 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d", + snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); -- Gitee From bdafe619c9dec4f7632ecb49c6fe295631d79d02 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:53 +0800 Subject: [PATCH 946/953] net: wangxun: add ethtool_ops for channel number ANBZ: #9013 commit 937d46ecc5f941b26270bdf7ce37495f12b25955 upstream. Add support to get RX/TX queue number with ethtool -l, and set RX/TX queue number with ethtool -L. Since interrupts need to be rescheduled, adjust the allocation of msix enties. Signed-off-by: Duanqiang Wen Signed-off-by: Jiawen Wu Link: https://lore.kernel.org/all/20240103020854.1656604-8-jiawenwu@trustnetic.com Signed-off-by: David S. Miller Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 57 ++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 103 +++++++++++++++++- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 86 ++++++++++----- drivers/net/ethernet/wangxun/libwx/wx_type.h | 31 +++++- .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 15 +++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 69 +++++++----- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 4 +- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 15 +++ .../net/ethernet/wangxun/txgbe/txgbe_main.c | 46 +++++++- .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 12 +- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 6 +- 12 files changed, 380 insertions(+), 68 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index f3c7e19dff5c..152049600148 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -364,3 +364,60 @@ void wx_set_msglevel(struct net_device *netdev, u32 data) wx->msg_enable = data; } EXPORT_SYMBOL(wx_set_msglevel); + +static unsigned int wx_max_channels(struct wx *wx) +{ + unsigned int max_combined; + + if (!wx->msix_q_entries) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else { + /* support up to max allowed queues with RSS */ + if (wx->mac.type == wx_mac_sp) + max_combined = 63; + else + max_combined = 8; + } + + return max_combined; +} + +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct wx *wx = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = wx_max_channels(wx); + + /* report info for other vector */ + if (wx->msix_q_entries) { + ch->max_other = 1; + ch->other_count = 1; + } + + /* record RSS queues */ + ch->combined_count = wx->ring_feature[RING_F_RSS].indices; +} +EXPORT_SYMBOL(wx_get_channels); + +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + unsigned int count = ch->combined_count; + struct wx *wx = netdev_priv(dev); + + /* verify other_count has not changed */ + if (ch->other_count != 1) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > wx_max_channels(wx)) + return -EINVAL; + + wx->ring_feature[RING_F_RSS].limit = count; + + return 0; +} +EXPORT_SYMBOL(wx_set_channels); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index d79157532d3d..fee7260384ef 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -36,4 +36,8 @@ int wx_set_coalesce(struct net_device *netdev, struct netlink_ext_ack *extack); u32 wx_get_msglevel(struct net_device *netdev); void wx_set_msglevel(struct net_device *netdev, u32 data); +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch); +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index d11f7d8db194..1db754615cca 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx) int vector; for (vector = 0; vector < wx->num_q_vectors; vector++) - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_q_entries[vector].vector); - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_entry->vector); } else { synchronize_irq(pdev->irq); } @@ -1597,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx) wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); } +static void wx_store_reta(struct wx *wx) +{ + u8 *indir_tbl = wx->rss_indir_tbl; + u32 reta = 0; + u32 i; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(wx, WX_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +static void wx_setup_reta(struct wx *wx) +{ + u16 rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 random_key_size = WX_RSS_KEY_SIZE / 4; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < random_key_size; i++) + wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); + + /* Fill out redirection table */ + memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl)); + + for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) { + if (j == rss_i) + j = 0; + + wx->rss_indir_tbl[i] = j; + } + + wx_store_reta(wx); +} + +static void wx_setup_mrqc(struct wx *wx) +{ + u32 rss_field = 0; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = WX_RDB_RA_CTL_RSS_IPV4 | + WX_RDB_RA_CTL_RSS_IPV4_TCP | + WX_RDB_RA_CTL_RSS_IPV4_UDP | + WX_RDB_RA_CTL_RSS_IPV6 | + WX_RDB_RA_CTL_RSS_IPV6_TCP | + WX_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); + + wx_setup_reta(wx); + + if (wx->rss_enabled) + rss_field |= WX_RDB_RA_CTL_RSS_EN; + + wr32(wx, WX_RDB_RA_CTL, rss_field); +} + /** * wx_configure_rx - Configure Receive Unit after Reset * @wx: pointer to private structure @@ -1629,6 +1695,8 @@ void wx_configure_rx(struct wx *wx) wr32(wx, WX_PSR_CTL, psrctl); } + wx_setup_mrqc(wx); + /* set_rx_buffer_len must be called before ring initialization */ wx_set_rx_buffer_len(wx); @@ -1826,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) } EXPORT_SYMBOL(wx_get_pcie_msix_counts); +/** + * wx_init_rss_key - Initialize wx RSS key + * @wx: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static int wx_init_rss_key(struct wx *wx) +{ + u32 *rss_key; + + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; + } + + return 0; +} + int wx_sw_init(struct wx *wx) { struct pci_dev *pdev = wx->pdev; @@ -1853,14 +1943,23 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = swab16((u16)ssid); } + err = wx_init_rss_key(wx); + if (err < 0) { + wx_err(wx, "rss key allocation failed\n"); + return err; + } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, sizeof(struct wx_mac_addr), GFP_KERNEL); if (!wx->mac_table) { wx_err(wx, "mac_table allocation failed\n"); + kfree(wx->rss_key); return -ENOMEM; } + wx->msix_in_use = false; + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 0481f646a303..8706223a6e5a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all); **/ static void wx_set_rss_queues(struct wx *wx) { - wx->num_rx_queues = wx->mac.max_rx_queues; - wx->num_tx_queues = wx->mac.max_tx_queues; + struct wx_ring_feature *f; + + /* set mask for 16 queue limit of RSS */ + f = &wx->ring_feature[RING_F_RSS]; + f->indices = f->limit; + + wx->num_rx_queues = f->limit; + wx->num_tx_queues = f->limit; } static void wx_set_num_queues(struct wx *wx) @@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx) struct irq_affinity affd = {0, }; int nvecs, i; - nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + /* We start by asking for one vector per queue pair */ + nvecs = max(wx->num_rx_queues, wx->num_tx_queues); + nvecs = min_t(int, nvecs, num_online_cpus()); + nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors); - wx->msix_entries = kcalloc(nvecs, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!wx->msix_entries) + wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_q_entries) return -ENOMEM; + /* One for non-queue interrupts */ + nvecs += 1; + + if (!wx->msix_in_use) { + wx->msix_entry = kcalloc(1, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entry) { + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + return -ENOMEM; + } + } + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, nvecs, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &affd); if (nvecs < 0) { wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + kfree(wx->msix_entry); + wx->msix_entry = NULL; return nvecs; } + wx->msix_entry->entry = 0; + wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0); + nvecs -= 1; for (i = 0; i < nvecs; i++) { - wx->msix_entries[i].entry = i; - wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + wx->msix_q_entries[i].entry = i; + wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1); } - /* one for msix_other */ - nvecs -= 1; wx->num_q_vectors = nvecs; - wx->num_rx_queues = nvecs; - wx->num_tx_queues = nvecs; return 0; } @@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; - wx->num_rx_queues = 1; - wx->num_tx_queues = 1; - wx->num_q_vectors = 1; + /* Disable RSS */ + dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); + wx->ring_feature[RING_F_RSS].limit = 1; + + wx_set_num_queues(wx); /* minmum one for queue, one for misc*/ nvecs = 1; @@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx) return; if (pdev->msix_enabled) { - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + if (!wx->msix_in_use) { + kfree(wx->msix_entry); + wx->msix_entry = NULL; + } } pci_free_irq_vectors(wx->pdev); } @@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; /* free only the irqs that were actually requested */ if (!q_vector->rx.ring && !q_vector->tx.ring) @@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx) } if (wx->mac.type == wx_mac_em) - free_irq(wx->msix_entries[vector].vector, wx); + free_irq(wx->msix_entry->vector, wx); } EXPORT_SYMBOL(wx_free_irq); @@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2095,7 +2124,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg |= WX_PX_ITR_CNT_WDIS; - wr32(wx, WX_PX_ITR(v_idx), itr_reg); + wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg); } /** @@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx) wx_write_eitr(q_vector); } - wx_set_ivar(wx, -1, 0, v_idx); + wx_set_ivar(wx, -1, 0, 0); if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(v_idx), 1950); + wr32(wx, WX_PX_ITR(0), 1950); } EXPORT_SYMBOL(wx_configure_vectors); @@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); - if (changed & NETIF_F_RXHASH) + if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, WX_RDB_RA_CTL_RSS_EN); - else + wx->rss_enabled = true; + } else { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); + wx->rss_enabled = false; + } if (changed & (NETIF_F_HW_VLAN_CTAG_RX | diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 17cdffe388d0..b4dc4f341117 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -147,8 +147,16 @@ #define WX_RDB_PL_CFG_L2HDR BIT(3) #define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) #define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) +#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) #define WX_RDB_RA_CTL 0x194F4 #define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ +#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16) +#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17) +#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20) +#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) +#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) +#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) /******************************* PSR Registers *******************************/ /* psr control */ @@ -921,6 +929,19 @@ struct wx_q_vector { struct wx_ring ring[] ____cacheline_internodealigned_in_smp; }; +struct wx_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +enum wx_ring_f_enum { + RING_F_NONE = 0, + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + enum wx_isb_idx { WX_ISB_HEADER, WX_ISB_MISC, @@ -1024,7 +1045,10 @@ struct wx { struct wx_q_vector *q_vector[64]; unsigned int queues_per_pool; - struct msix_entry *msix_entries; + struct msix_entry *msix_q_entries; + struct msix_entry *msix_entry; + bool msix_in_use; + struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE]; /* misc interrupt status block */ dma_addr_t isb_dma; @@ -1032,8 +1056,9 @@ struct wx { u32 isb_tag[WX_ISB_MAX]; #define WX_MAX_RETA_ENTRIES 128 +#define WX_RSS_INDIR_TBL_MAX 64 u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; - + bool rss_enabled; #define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; u32 wol; @@ -1050,7 +1075,7 @@ struct wx { }; #define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT(i) +#define WX_INTR_Q(i) BIT((i) + 1) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 5800bd8c8696..cdf35733705f 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -92,6 +92,19 @@ static int ngbe_set_ringparam(struct net_device *netdev, return 0; } +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + static const struct ethtool_ops ngbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -115,6 +128,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = ngbe_set_channels, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 96d80c595cb8..fdd6b4f70b7a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx) } } -/** - * ngbe_init_rss_key - Initialize wx RSS key - * @wx: device handle - * - * Allocates and initializes the RSS key if it is not allocated. - **/ -static inline int ngbe_init_rss_key(struct wx *wx) -{ - u32 *rss_key; - - if (!wx->rss_key) { - rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; - - netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); - wx->rss_key = rss_key; - } - - return 0; -} - /** * ngbe_sw_init - Initialize general software structures * @wx: board private structure to initialize @@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx) dev_err(&pdev->dev, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; - if (ngbe_init_rss_key(wx)) - return -ENOMEM; + wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; @@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) if (queues) wx_intr_enable(wx, NGBE_INTR_ALL); else - wx_intr_enable(wx, NGBE_INTR_MISC(wx)); + wx_intr_enable(wx, NGBE_INTR_MISC); } /** @@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entries[vector].vector, + err = request_irq(wx->msix_entry->vector, ngbe_msix_other, 0, netdev->name, wx); if (err) { @@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev) } } +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ngbe_close(dev); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + static const struct net_device_ops ngbe_netdev_ops = { .ndo_open = ngbe_open, .ndo_stop = ngbe_close, @@ -715,6 +727,7 @@ static void ngbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 0a98080a197a..f48ed7fc1805 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -80,7 +80,7 @@ NGBE_PX_MISC_IEN_GPIO) #define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define NGBE_INTR_MISC BIT(0) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_CFG_LAN_SPEED 0x14440 @@ -105,6 +105,7 @@ #define NGBE_FW_CMD_ST_FAIL 0x70657376 #define NGBE_MAX_FDIR_INDICES 7 +#define NGBE_MAX_RSS_INDICES 8 #define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) #define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) @@ -132,5 +133,6 @@ extern char ngbe_driver_name[]; void ngbe_down(struct wx *wx); void ngbe_up(struct wx *wx); +int ngbe_setup_tc(struct net_device *dev, u8 tc); #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index fa83cac320d3..084e2faf9db1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -58,6 +58,19 @@ static int txgbe_set_ringparam(struct net_device *netdev, return 0; } +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; + + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -79,6 +92,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = txgbe_set_channels, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index bcc47bc6264a..3b151c410a5c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues) wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); if (queues) wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } @@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -378,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx) wx_err(wx, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; + wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -504,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev) } } +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(wx); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + txgbe_open(dev); + + return 0; +} + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -778,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index b1b5cdc04a92..1b84d495d14e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -487,7 +487,7 @@ static void txgbe_irq_handler(struct irq_desc *desc) } /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); } static int txgbe_gpio_init(struct txgbe *txgbe) @@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; - girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector; + + /* now only suuported on MSI-X interrupt */ + if (!wx->msix_entry) + return -EPERM; + + girq->parents[0] = wx->msix_entry->vector; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; @@ -749,6 +754,8 @@ int txgbe_init_phy(struct txgbe *txgbe) goto err_unregister_i2c; } + wx->msix_in_use = true; + return 0; err_unregister_i2c: @@ -781,4 +788,5 @@ void txgbe_remove_phy(struct txgbe *txgbe) phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); + txgbe->wx->msix_in_use = false; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 801fd0aed1ff..270a6fd9ad0b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -98,6 +98,7 @@ #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 +#define TXGBE_MAX_RSS_INDICES 63 #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) @@ -122,8 +123,8 @@ #define TXGBE_DEFAULT_RX_WORK 128 #endif -#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) -#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_MISC BIT(0) +#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) #define TXGBE_MAX_EITR GENMASK(11, 3) @@ -131,6 +132,7 @@ extern char txgbe_driver_name[]; void txgbe_down(struct wx *wx); void txgbe_up(struct wx *wx); +int txgbe_setup_tc(struct net_device *dev, u8 tc); #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ -- Gitee From 257ed64f27844f704be4fd8cedd7225e1810235e Mon Sep 17 00:00:00 2001 From: Duanqiang Wen Date: Thu, 18 Apr 2024 10:15:56 +0800 Subject: [PATCH 947/953] net: libwx: fix alloc msix vectors failed ANBZ: #9013 commit 69197dfc64007b5292cc960581548f41ccd44828 upstream. driver needs queue msix vectors and one misc irq vector, but only queue vectors need irq affinity. when num_online_cpus is less than chip max msix vectors, driver will acquire (num_online_cpus + 1) vecotrs, and call pci_alloc_irq_vectors_affinity functions with affinity params without setting pre_vectors or post_vectors, it will cause return error code -ENOSPC. Misc irq vector is vector 0, driver need to set affinity params .pre_vectors = 1. Fixes: 3f703186113f ("net: libwx: Add irq flow functions") Signed-off-by: Duanqiang Wen Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 8706223a6e5a..08d3e4069c5f 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx) */ static int wx_acquire_msix_vectors(struct wx *wx) { - struct irq_affinity affd = {0, }; + struct irq_affinity affd = { .pre_vectors = 1 }; int nvecs, i; /* We start by asking for one vector per queue pair */ -- Gitee From 9a18a2f5810bf159c3d005ee56bf692cfa1f1751 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" Date: Fri, 23 Feb 2024 15:25:56 -0500 Subject: [PATCH 948/953] clkdev: Update clkdev id usage to allow for longer names ANBZ: #9013 commit 99f4570cfba1e60daafde737cb7e395006d719e6 upstream. clkdev DEV ID information is limited to an array of 20 bytes (MAX_DEV_ID). It is possible that the ID could be longer than that. If so, the lookup will fail because the "real ID" will not match the copied value. For instance, generating a device name for the I2C Designware module using the PCI ID can result in a name of: i2c_designware.39424 clkdev_create() will store: i2c_designware.3942 The stored name is one off and will not match correctly during probe. Increase the size of the ID to allow for a longer name. Reviewed-by: Russell King (Oracle) Signed-off-by: Michael J. Ruhl Link: https://lore.kernel.org/r/20240223202556.2194021-1-michael.j.ruhl@intel.com Reviewed-by: Andy Shevchenko Signed-off-by: Stephen Boyd Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- drivers/clk/clkdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index ee37d0be6877..9cd80522ca2d 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -144,7 +144,7 @@ void clkdev_add_table(struct clk_lookup *cl, size_t num) mutex_unlock(&clocks_mutex); } -#define MAX_DEV_ID 20 +#define MAX_DEV_ID 24 #define MAX_CON_ID 16 struct clk_lookup_alloc { -- Gitee From 5a03a1122071be2d33dec9ec6ebcfb6c69297f9a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 22 Feb 2024 07:12:11 +0800 Subject: [PATCH 949/953] selftests: net: veth: test syncing GRO and XDP state while device is down MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9028 commit 1a825e4cdf457b7aef7ebbc2f1206654f5beb150 upstream. Test that we keep GRO flag in sync when XDP is disabled while the device is closed. Signed-off-by: Jakub Kicinski Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller Signed-off-by: Xiao Long Signed-off-by: Philo Lu Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3178 --- tools/testing/selftests/net/veth.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh index 27574bbf2d63..5ae85def0739 100755 --- a/tools/testing/selftests/net/veth.sh +++ b/tools/testing/selftests/net/veth.sh @@ -246,6 +246,20 @@ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on chk_gro " - aggregation with TSO off" 1 cleanup +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST on +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST off +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST off +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST off +cleanup + create_ns chk_channels "default channels" $DST 1 1 -- Gitee From 907405c5f5b37f1cc5f489acf32973c0be70af90 Mon Sep 17 00:00:00 2001 From: Ignat Korchagin Date: Thu, 14 Mar 2024 02:37:59 +0800 Subject: [PATCH 950/953] selftests: net: veth: test the ability to independently manipulate GRO and XDP ANBZ: #9028 commit ba5a6476e3866c97e2c85f64b0c7dfb8fbdda18a upstream. We should be able to independently flip either XDP or GRO states and toggling one should not affect the other. Adjust other tests as well that had implicit expectation that GRO would be automatically enabled. Signed-off-by: Ignat Korchagin Signed-off-by: David S. Miller Signed-off-by: Xiao Long Signed-off-by: Philo Lu Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3178 --- tools/testing/selftests/net/udpgro_fwd.sh | 4 ++++ tools/testing/selftests/net/veth.sh | 24 ++++++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index 9cd5e885e91f..380cb15e942e 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -217,6 +217,7 @@ for family in 4 6; do cleanup create_ns + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list" $BM_NET$DST 1 0 cleanup @@ -227,6 +228,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_ns ip -n $NS_DST addr add dev veth$DST $BM_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $BM_NET$DST_NAT \ -j DNAT --to-destination $BM_NET$DST @@ -240,6 +242,7 @@ for family in 4 6; do cleanup create_vxlan_pair + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list over UDP tunnel" $OL_NET$DST 1 1 cleanup @@ -247,6 +250,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_vxlan_pair ip -n $NS_DST addr add dev $VXDEV$DST $OL_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $OL_NET$DST_NAT \ -j DNAT --to-destination $OL_NET$DST diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh index 5ae85def0739..3a394b43e274 100755 --- a/tools/testing/selftests/net/veth.sh +++ b/tools/testing/selftests/net/veth.sh @@ -249,9 +249,9 @@ cleanup create_ns ip -n $NS_DST link set dev veth$DST up ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp -chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +chk_gro_flag "gro vs xdp while down - gro flag off" $DST off ip -n $NS_DST link set dev veth$DST down -chk_gro_flag " - after down" $DST on +chk_gro_flag " - after down" $DST off ip -n $NS_DST link set dev veth$DST xdp off chk_gro_flag " - after xdp off" $DST off ip -n $NS_DST link set dev veth$DST up @@ -260,6 +260,21 @@ ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp chk_gro_flag " - after peer xdp" $DST off cleanup +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST on +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST on +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST on +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST on +cleanup + create_ns chk_channels "default channels" $DST 1 1 @@ -327,11 +342,14 @@ if [ $CPUS -gt 2 ]; then fi ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null -chk_gro_flag "with xdp attached - gro flag" $DST on +chk_gro_flag "with xdp attached - gro flag" $DST off chk_gro_flag " - peer gro flag" $SRC off chk_tso_flag " - tso flag" $SRC off chk_tso_flag " - peer tso flag" $DST on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on +chk_gro " - no aggregation" 10 +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag " - gro flag with GRO on" $DST on chk_gro " - aggregation" 1 -- Gitee From df99dfd5cf25c6c908a2c09f61dbbcb617ed149f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 14 May 2024 16:00:03 +0800 Subject: [PATCH 951/953] anolis: crypto: ccp: Return -ENODEV if Hygon PSP is not configured with CSV capability ANBZ: #9023 The rules to determine psp_master is not exactly the same between AMD ASP and Hygon PSP. If a Hygon PSP is not configured with CSV capability, it should not be the psp_master. Fixes: 39e18cb04c1f ("anolis: crypto: ccp: Fixup the capability of Hygon PSP during initialization") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3174 --- drivers/crypto/ccp/psp-dev.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 1566b955730e..b4aea8dbdc28 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -236,11 +236,15 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) } #endif -static void hygon_fixup_psp_caps(struct psp_device *psp) +static int hygon_fixup_psp_caps(struct psp_device *psp) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - psp->capability &= ~(PSP_CAPABILITY_TEE | - PSP_CAPABILITY_PSP_SECURITY_REPORTING); + /* the hygon psp is unavailable if bit0 cleared in feature reg */ + if (!(psp->capability & PSP_CAPABILITY_SEV)) + return -ENODEV; + + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); + return 0; } static unsigned int psp_get_capability(struct psp_device *psp) @@ -263,8 +267,13 @@ static unsigned int psp_get_capability(struct psp_device *psp) /* * Fix capability of Hygon psp, the meaning of Hygon psp feature * register is not exactly the same as AMD. + * Return -ENODEV directly if hygon psp not configured with CSV + * capability. */ - hygon_fixup_psp_caps(psp); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (hygon_fixup_psp_caps(psp)) + return -ENODEV; + } /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && -- Gitee From 425ac07855c2de16c7d0923a80f4d12571d4a516 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 18 Dec 2023 23:39:32 +0000 Subject: [PATCH 952/953] kselftest/arm64: Don't probe the current VL for unsupported vector types ANBZ: #8856 commit 9a802ddb2123e5adec394d35cd539cc0b15bc830 upstream. The vec-syscfg selftest verifies that setting the VL of the currently tested vector type does not disrupt the VL of the other vector type. To do this it records the current vector length for each type but neglects to guard this with a check for that vector type actually being supported. Add one, using a helper function which we also update all the other instances of this pattern. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20231218-kselftest-arm64-vec-syscfg-rdvl-v1-1-0ac22d47e81f@kernel.org Signed-off-by: Will Deacon Signed-off-by: Ruidong Tian Reviewed-by: Baolin Wang Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3182 --- tools/testing/selftests/arm64/fp/vec-syscfg.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index 5f648b97a06f..ea9c7d47790f 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -66,6 +66,11 @@ static struct vec_data vec_data[] = { }, }; +static bool vec_type_supported(struct vec_data *data) +{ + return getauxval(data->hwcap_type) & data->hwcap; +} + static int stdio_read_integer(FILE *f, const char *what, int *val) { int n = 0; @@ -564,8 +569,11 @@ static void prctl_set_all_vqs(struct vec_data *data) return; } - for (i = 0; i < ARRAY_SIZE(vec_data); i++) + for (i = 0; i < ARRAY_SIZE(vec_data); i++) { + if (!vec_type_supported(&vec_data[i])) + continue; orig_vls[i] = vec_data[i].rdvl(); + } for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { vl = sve_vl_from_vq(vq); @@ -594,7 +602,7 @@ static void prctl_set_all_vqs(struct vec_data *data) if (&vec_data[i] == data) continue; - if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap)) + if (!vec_type_supported(&vec_data[i])) continue; if (vec_data[i].rdvl() != orig_vls[i]) { @@ -765,7 +773,7 @@ int main(void) struct vec_data *data = &vec_data[i]; unsigned long supported; - supported = getauxval(data->hwcap_type) & data->hwcap; + supported = vec_type_supported(data); if (!supported) all_supported = false; -- Gitee From 4a710e89dafcb1edee5ae7ce1d9173f28796d957 Mon Sep 17 00:00:00 2001 From: kuangmingfu 10101884 Date: Tue, 21 May 2024 22:21:47 -0400 Subject: [PATCH 953/953] anolis: kabi: Add some reserve fields for kABI stability ANBZ: #9274 Analyze the change of data struct size by using pahole, adding reserve fields for this modification make the total size of struct increase 800 bytes, and the total hole size of struct increase 11 bytes. Signed-off-by: kuang.mingfu <12550788+kuangmingfu@user.noreply.gitee.com> --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/x86/include/asm/kvm_host.h | 3 +++ drivers/block/brd.c | 4 ++++ drivers/pci/controller/dwc/pcie-designware.h | 7 +++++++ fs/ubifs/debug.h | 4 ++++ fs/ubifs/ubifs.h | 13 +++++++++++++ include/linux/binfmts.h | 4 ++++ include/linux/bpf.h | 4 ++++ include/linux/fs.h | 3 +++ include/linux/if_macvlan.h | 4 ++++ include/linux/iommu.h | 3 +++ include/linux/kvm_host.h | 6 +++++- include/linux/memcontrol.h | 6 ++++++ include/linux/mm_types.h | 6 +++++- include/linux/pci-epc.h | 5 +++++ include/linux/pci-epf.h | 7 +++++++ include/linux/sched.h | 9 +++++++++ include/linux/sched/user.h | 4 ++++ include/linux/sunrpc/svc.h | 7 +++++++ include/linux/task_io_accounting.h | 10 ++++++++++ include/linux/tcp.h | 8 +++++++- include/linux/watchdog.h | 8 +++++++- include/net/netns/ipv4.h | 4 ++++ include/scsi/scsi_cmnd.h | 4 ++++ kernel/workqueue.c | 7 +++++++ mm/ksm.c | 4 ++++ 26 files changed, 143 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..345680cfa9d2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -11,6 +11,7 @@ #ifndef __ARM64_KVM_HOST_H__ #define __ARM64_KVM_HOST_H__ +#include #include #include #include @@ -279,6 +280,8 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + + CK_KABI_RESERVE(1) }; struct kvm_vcpu_fault_info { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 36cd8eee3f77..33ccd1bfc7d0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -8,6 +8,7 @@ #ifndef _ASM_X86_KVM_HOST_H #define _ASM_X86_KVM_HOST_H +#include #include #include #include @@ -1462,6 +1463,8 @@ struct kvm_arch { */ #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1) struct kvm_mmu_memory_cache split_desc_cache; + + CK_KABI_RESERVE(1) }; struct kvm_vm_stat { diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 970bd6ff38c4..f46f2e513789 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -44,6 +45,9 @@ struct brd_device { */ struct xarray brd_pages; u64 brd_nr_pages; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index ef0b2efa9f93..4c838e46b046 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -11,6 +11,7 @@ #ifndef _PCIE_DESIGNWARE_H #define _PCIE_DESIGNWARE_H +#include #include #include #include @@ -341,6 +342,8 @@ struct dw_pcie_ep_ops { * driver. */ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); + + CK_KABI_RESERVE(1) }; struct dw_pcie_ep_func { @@ -364,6 +367,8 @@ struct dw_pcie_ep { void __iomem *msi_mem; phys_addr_t msi_mem_phys; struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; + + CK_KABI_RESERVE(1) }; struct dw_pcie_ops { @@ -406,6 +411,8 @@ struct dw_pcie { struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS]; struct gpio_desc *pe_rst; bool suspended; + + CK_KABI_RESERVE(1) }; #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index ed966108da80..3ce0e6b9a9bd 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -11,6 +11,7 @@ #ifndef __UBIFS_DEBUG_H__ #define __UBIFS_DEBUG_H__ +#include /* Checking helper functions */ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *priv); @@ -115,6 +116,9 @@ struct ubifs_debug_info { struct dentry *dfs_chk_fs; struct dentry *dfs_tst_rcvry; struct dentry *dfs_ro_error; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ebb3ad6b5e7e..b6e743b55dad 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -428,6 +429,9 @@ struct ubifs_inode { pgoff_t read_in_a_row; int data_len; void *data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1522,6 +1526,15 @@ struct ubifs_info { struct ubifs_debug_info *dbg; struct ubifs_stats_info *stats; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; extern struct list_head ubifs_infos; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5e..ff80dd791aae 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -2,6 +2,7 @@ #ifndef _LINUX_BINFMTS_H #define _LINUX_BINFMTS_H +#include #include #include #include @@ -62,6 +63,9 @@ struct linux_binprm { struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ char buf[BINPRM_BUF_SIZE]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 9b08d792fa95..d918f9bc8cea 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -4,6 +4,7 @@ #ifndef _LINUX_BPF_H #define _LINUX_BPF_H 1 +#include #include #include @@ -1474,6 +1475,9 @@ struct bpf_prog_aux { struct work_struct work; struct rcu_head rcu; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct bpf_prog { diff --git a/include/linux/fs.h b/include/linux/fs.h index 59b0fbaf6eb0..47c79ece0960 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -489,6 +490,8 @@ struct address_space { struct list_head private_list; struct rw_semaphore i_mmap_rwsem; void *private_data; + + CK_KABI_RESERVE(1) } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 523025106a64..87535af70c44 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -2,6 +2,7 @@ #ifndef _LINUX_IF_MACVLAN_H #define _LINUX_IF_MACVLAN_H +#include #include #include #include @@ -35,6 +36,9 @@ struct macvlan_dev { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0225cf7445de..8502f7a4834e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -7,6 +7,7 @@ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H +#include #include #include #include @@ -111,6 +112,8 @@ struct iommu_domain { int users; }; }; + + CK_KABI_RESERVE(1) }; static inline bool iommu_is_dma_domain(struct iommu_domain *domain) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d027f8fd23bf..a423e1345702 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2,7 +2,7 @@ #ifndef __KVM_HOST_H #define __KVM_HOST_H - +#include #include #include #include @@ -392,6 +392,8 @@ struct kvm_vcpu { */ struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; + + CK_KABI_RESERVE(1) }; /* @@ -588,6 +590,8 @@ struct kvm_memory_slot { u32 flags; short id; u16 as_id; + + CK_KABI_RESERVE(1) }; static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e8498d088bb2..c2803d210479 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -10,6 +10,8 @@ #ifndef _LINUX_MEMCONTROL_H #define _LINUX_MEMCONTROL_H + +#include #include #include #include @@ -333,6 +335,10 @@ struct mem_cgroup { struct lru_gen_mm_list mm_list; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) struct mem_cgroup_per_node *nodeinfo[]; }; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36c5b43999e6..3ae4225c3da0 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -3,7 +3,7 @@ #define _LINUX_MM_TYPES_H #include - +#include #include #include #include @@ -660,6 +660,8 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + + CK_KABI_RESERVE(1) } __randomize_layout; #ifdef CONFIG_SCHED_MM_CID @@ -919,6 +921,8 @@ struct mm_struct { #endif /* CONFIG_LRU_GEN */ } __randomize_layout; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 5cb694031072..e94a4146965d 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -9,6 +9,7 @@ #ifndef __LINUX_PCI_EPC_H #define __LINUX_PCI_EPC_H +#include #include struct pci_epc; @@ -89,6 +90,8 @@ struct pci_epc_ops { const struct pci_epc_features* (*get_features)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); struct module *owner; + + CK_KABI_RESERVE(1) }; /** @@ -150,6 +153,8 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 3f44b6aec477..8e65f41fd524 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -9,6 +9,7 @@ #ifndef __LINUX_PCI_EPF_H #define __LINUX_PCI_EPF_H +#include #include #include #include @@ -51,6 +52,8 @@ struct pci_epf_header { u16 subsys_vendor_id; u16 subsys_id; enum pci_interrupt_pin interrupt_pin; + + CK_KABI_RESERVE(1) }; /** @@ -121,6 +124,8 @@ struct pci_epf_bar { size_t size; enum pci_barno barno; int flags; + + CK_KABI_RESERVE(1) }; /** @@ -180,6 +185,8 @@ struct pci_epf { unsigned long vfunction_num_map; struct list_head pci_vepf; const struct pci_epc_event_ops *event_ops; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 5ce57c478069..4b17257e52c8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -37,6 +37,7 @@ #include #include #include +#include #include /* task_struct member predeclarations (sorted alphabetically): */ @@ -1572,6 +1573,14 @@ struct task_struct { }; unsigned long wait_moment; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 4cc52698e214..f57d86d6ecf8 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -2,6 +2,7 @@ #ifndef _LINUX_SCHED_USER_H #define _LINUX_SCHED_USER_H +#include #include #include #include @@ -34,6 +35,9 @@ struct user_struct { /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int uids_sysfs_init(void); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index dbf5b21feafe..e40aad7476b3 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -20,6 +20,7 @@ #include #include #include +#include /* * @@ -44,6 +45,9 @@ struct svc_pool { struct percpu_counter sp_threads_woken; unsigned long sp_flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } ____cacheline_aligned_in_smp; /* bits for sp_flags */ @@ -96,6 +100,9 @@ struct svc_serv { * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 6f6acce064de..c82625f477b7 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -9,6 +9,8 @@ * Blame Andrew Morton for all this. */ +#include + struct task_io_accounting { #ifdef CONFIG_TASK_XACCT /* bytes read */ @@ -43,4 +45,12 @@ struct task_io_accounting { */ u64 cancelled_write_bytes; #endif /* CONFIG_TASK_IO_ACCOUNTING */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3c5efeeb024f..6e116041204e 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -13,7 +13,7 @@ #ifndef _LINUX_TCP_H #define _LINUX_TCP_H - +#include #include #include #include @@ -452,6 +452,9 @@ struct tcp_sock { */ struct request_sock __rcu *fastopen_rsk; struct saved_syn *saved_syn; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; enum tsq_enum { @@ -497,6 +500,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 99660197a36c..11feccdde6e7 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -9,7 +9,7 @@ #ifndef _LINUX_WATCHDOG_H #define _LINUX_WATCHDOG_H - +#include #include #include #include @@ -53,6 +53,9 @@ struct watchdog_ops { unsigned int (*get_timeleft)(struct watchdog_device *); int (*restart)(struct watchdog_device *, unsigned long, void *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); + + CK_KABI_RESERVE(1); + CK_KABI_RESERVE(2); }; /** struct watchdog_device - The structure that defines a watchdog device @@ -119,6 +122,9 @@ struct watchdog_device { #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ #define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */ struct list_head deferred; + + CK_KABI_RESERVE(1); + CK_KABI_RESERVE(2); }; #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7a41c4791536..34d5b055c720 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -11,6 +11,7 @@ #include #include #include +#include struct ctl_table_header; struct ipv4_devconf; @@ -239,5 +240,8 @@ struct netns_ipv4 { atomic_t rt_genid; siphash_key_t ip_id_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 526def14e7fb..a18734ef323c 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -9,6 +9,7 @@ #include #include #include +#include #include struct Scsi_Host; @@ -141,6 +142,9 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fd7b84b06d92..6af7ce2dbf66 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -199,6 +200,9 @@ struct worker_pool { * from get_work_pool(). */ struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -323,6 +327,9 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static struct kmem_cache *pwq_cache; diff --git a/mm/ksm.c b/mm/ksm.c index 981af9c72e7a..cdb24bdaec93 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include "internal.h" @@ -140,6 +141,9 @@ struct ksm_scan { unsigned long address; struct ksm_rmap_item **rmap_list; unsigned long seqnr; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** -- Gitee